修改文件名属性

This commit is contained in:
2024-06-05 22:10:21 +08:00
parent 7c9a900b95
commit cbeba48da0
21 changed files with 1466 additions and 839 deletions

View File

@ -3,6 +3,7 @@ from collections import defaultdict
from tqdm import tqdm
from gurobipy import *
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
import os
import time
@ -14,9 +15,13 @@ import argparse
import joblib
import pickle
import warnings
import heapq
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
# 机器参数
max_head_index, max_slot_index = 6, 120
@ -29,7 +34,7 @@ head_nozzle = ['' for _ in range(max_head_index)] # 头上已经分配吸嘴
slotf1_pos, slotr1_pos = [-31.267, 44.], [807., 810.545] # F1(前基座最左侧)、R1(后基座最右侧)位置
fix_camera_pos = [269.531, 694.823] # 固定相机位置
anc_marker_pos = [336.457, 626.230] # ANC基准点位置
stopper_pos = [535.150, 124.738] # 止档块位置
stopper_pos = [665.150, 124.738] # 止档块位置
# 算法权重参数
e_nz_change, e_gang_pick = 4, 0.6
@ -39,12 +44,14 @@ head_rotary_velocity = 8e-5 # 贴装头R轴旋转时间
x_max_velocity, y_max_velocity = 1.4, 1.2
x_max_acceleration, y_max_acceleration = x_max_velocity / 0.079, y_max_velocity / 0.079
# 不同种类供料器宽度
feeder_width = {'SM8': (7.25, 7.25), 'SM12': (7.00, 20.00), 'SM16': (7.00, 22.00),
'SM24': (7.00, 29.00), 'SM32': (7.00, 44.00)}
# TODO: 不同种类供料器宽度
feeder_width = {'SM8': (7.25, 7.25), 'SM12': (7.25, 7.25), 'SM16': (7.25, 7.25),
'SM24': (7.25, 7.25), 'SM32': (7.25, 7.25)}
# feeder_width = {'SM8': (7.25, 7.25), 'SM12': (7.00, 20.00), 'SM16': (7.00, 22.00),
# 'SM24': (7.00, 29.00), 'SM32': (7.00, 44.00)}
# 可用吸嘴数量限制
nozzle_limit = {'CN065': 6, 'CN040': 6, 'CN220': 6, 'CN400': 6, 'CN140': 6}
nozzle_limit = {'CN065': 6, 'CN040': 6, 'CN020': 6, 'CN400': 6, 'CN140': 6}
# 时间参数
t_cycle = 0.3
@ -61,21 +68,31 @@ T_pp, T_tr, T_nc, T_pl = 2, 5, 25, 0
class OptInfo:
def __init__(self):
self.total_time = .0 # 总组装时间
self.total_points = .0 # 总贴装点数
self.total_points = 0 # 总贴装点数
self.total_components = 0 # 总元件数
self.pickup_time = .0 # 拾取过程运动时间
self.round_time = .0 # 往返基座/基板运动时间
self.place_time = .0 # 贴装过程运动时间
self.operation_time = .0 # 拾取/贴装/换吸嘴等机械动作用时
self.cycle_counter = 0 # 周期数
self.nozzle_change_counter = 0 # 吸嘴更换次数
self.anc_round_counter = 0 # 前往ANC次数
self.pickup_counter = 0 # 拾取次数
self.cycle_counter = 0 # 周期数
self.nozzle_change_counter = 0 # 吸嘴更换次数
self.anc_round_counter = 0 # 前往ANC次数
self.pickup_counter = 0 # 拾取次数
self.total_distance = .0 # 总移动路径
self.place_distance = .0 # 贴装移动路径
self.pickup_distance = .0 # 拾取移动路径
self.total_distance = .0 # 总移动路径
self.place_distance = .0 # 贴装移动路径
self.pickup_distance = .0 # 拾取移动路径
def print(self):
print('-Cycle counter: {}'.format(self.cycle_counter))
print(f'-Nozzle change counter: {self.nozzle_change_counter: d}')
print(f'-ANC round: {self.anc_round_counter: d}')
print(f'-Pick operation counter: {self.pickup_counter: d}')
print(f'-Pick time: {self.pickup_time: .3f}, Pick distance: {self.pickup_distance: .3f}')
print(f'-Place time: {self.place_time: .3f}, Place distance: {self.place_distance: .3f}')
def axis_moving_time(distance, axis=0):
@ -145,12 +162,13 @@ def feeder_assignment(component_data, pcb_data, component_result, cycle_result):
feeder_slot_result, feeder_group_result = [], []
feeder_limit = defaultdict(int)
for component in range(len(component_data)):
feeder_limit[component] = component_data.loc[component]['feeder-limit']
feeder_limit[component] = component_data.loc[component].fdn
for component_cycle in component_result:
new_feeder_group = []
for component in component_cycle:
if component == -1 or feeder_limit[component] == 0 or new_feeder_group.count(component) >= feeder_limit[component]:
if component == -1 or feeder_limit[component] == 0 or new_feeder_group.count(component) >= feeder_limit[
component]:
new_feeder_group.append(-1)
else:
new_feeder_group.append(component)
@ -401,8 +419,11 @@ def dynamic_programming_cycle_path(pcb_data, cycle_placement, assigned_feeder):
@timer_wrapper
def greedy_placement_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result, hinter=True):
def greedy_placement_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
hinter=True):
placement_result, head_sequence_result = [], []
if len(pcb_data) == 0:
return placement_result, head_sequence_result
mount_point_index = [[] for _ in range(len(component_data))]
mount_point_pos = [[] for _ in range(len(component_data))]
@ -1037,7 +1058,7 @@ def convert_line_assigment(pcb_data, component_data, assignment_result):
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
feeder_limit = data.fdn
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
@ -1047,23 +1068,30 @@ def convert_line_assigment(pcb_data, component_data, assignment_result):
if feeder_points[machine_index] == 0:
continue
arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1)
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] = arg_feeder
feeder_limit -= arg_feeder
for machine_index in range(machine_number):
if feeder_limit <= 0:
break
if feeder_points[machine_index] == 0:
continue
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] += 1
partial_component_data[machine_index].loc[part_index].fdn = 1
feeder_limit -= 1
while feeder_limit:
assign_machine = None
for machine_index in range(machine_number):
if feeder_limit <= 0:
break
if feeder_points[machine_index] == 0:
continue
if assign_machine is None or feeder_points[machine_index] / \
partial_component_data[machine_index].loc[part_index].fdn > feeder_points[
assign_machine] / partial_component_data[assign_machine].loc[part_index].fdn:
assign_machine = machine_index
partial_component_data[assign_machine].loc[part_index, 'fdn'] += 1
feeder_limit -= 1
assert assign_machine is not None
for machine_index in range(machine_number):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index, 'feeder-limit'] > 0
assert partial_component_data[machine_index].loc[part_index].fdn > 0
# === assign placements ===
part2idx = defaultdict(int)
@ -1127,57 +1155,23 @@ def convert_line_assigment(pcb_data, component_data, assignment_result):
partial_component_data[idx].loc[part_index, 'points'] += 1
partial_pcb_data[idx] = pd.concat([partial_pcb_data[idx], pd.DataFrame(data).T])
# === adjust the number of available feeders for single optimization separately ===
# for machine_index, data in partial_pcb_data.items():
# part_info = [] # part info list(part index, part points, available feeder-num, upper feeder-num)
# for part_index, cp_data in partial_component_data[machine_index].iterrows():
# if assignment_result[machine_index][part_index]:
# part_info.append(
# [part_index, assignment_result[machine_index][part_index], 1, cp_data['feeder-limit']])
#
# part_info = sorted(part_info, key=lambda x: x[1], reverse=True)
# start_index, end_index = 0, min(max_head_index - 1, len(part_info) - 1)
# while start_index < len(part_info):
# assign_part_point, assign_part_index = [], []
# for idx_ in range(start_index, end_index + 1):
# for _ in range(part_info[idx_][2]):
# assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
# assign_part_index.append(idx_)
#
# variance = np.std(assign_part_point)
# while start_index <= end_index:
# part_info_index = assign_part_index[np.argmax(assign_part_point)]
#
# if part_info[part_info_index][2] < part_info[part_info_index][3]: # 供料器数目上限的限制
# part_info[part_info_index][2] += 1
# end_index -= 1
#
# new_assign_part_point, new_assign_part_index = [], []
# for idx_ in range(start_index, end_index + 1):
# for _ in range(part_info[idx_][2]):
# new_assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
# new_assign_part_index.append(idx_)
#
# new_variance = np.std(new_assign_part_point)
# if variance < new_variance:
# part_info[part_info_index][2] -= 1
# end_index += 1
# break
#
# variance = new_variance
# assign_part_index, assign_part_point = new_assign_part_index.copy(), new_assign_part_point.copy()
# else:
# break
#
# start_index = end_index + 1
# end_index = min(start_index + max_head_index - 1, len(part_info) - 1)
#
# max_avl_feeder = max(part_info, key=lambda x: x[2])[2]
# for info in part_info:
# partial_component_data[machine_index].loc[info[0], 'feeder-limit'] = math.ceil(info[2] / max_avl_feeder)
for machine_index in range(machine_number):
partial_component_data[machine_index] = partial_component_data[machine_index][
partial_component_data[machine_index]['points'] != 0].reset_index(drop=True)
return partial_pcb_data, partial_component_data
def random_division(num, div):
assert num >= div
res = [1 for _ in range(num)]
while sum(res) < num:
pos = random.randint(0, num - 1)
val = random.randint(1, num - sum(res))
res[pos] = val
return res
def list_range(start, end=None):
return list(range(start)) if end is None else list(range(start, end))

View File

@ -1,11 +1,11 @@
# 用于提供对外接口
from base_optimizer.optimizer_scanbased import *
from base_optimizer.optimizer_celldivision import *
from base_optimizer.optimizer_hybridgenetic import *
from base_optimizer.optimizer_feederpriority import *
from base_optimizer.optimizer_aggregation import *
from base_optimizer.optimizer_twophase import *
from base_optimizer.optimizer_mathmodel import *
from base_optimizer.smopt_scanbased import *
from base_optimizer.smopt_celldivision import *
from base_optimizer.smopt_hybridgenetic import *
from base_optimizer.smopt_feederpriority import *
from base_optimizer.smopt_aggregation import *
from base_optimizer.smopt_twophase import *
from base_optimizer.smopt_mathmodel import *
from base_optimizer.result_analysis import *
@ -25,17 +25,17 @@ def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, me
elif method == 'hybrid-genetic': # 基于拾取组的混合遗传算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_hybrid_genetic(
pcb_data, component_data, hinter=False)
pcb_data, component_data, hinter=hinter)
elif method == 'aggregation': # 基于batch-level的整数规划 + 启发式算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_aggregation(
component_data, pcb_data)
elif method == 'genetic-scanning':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_genetic_scanning(
component_data, pcb_data, hinter=False)
component_data, pcb_data, hinter=hinter)
elif method == 'mip-model':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_mathmodel(
component_data, pcb_data, hinter=True)
component_data, pcb_data, hinter=hinter)
elif method == "two-phase":
component_result, feeder_slot_result, cycle_result = gurobi_optimizer(pcb_data, component_data, feeder_data,
initial=True, partition=True,
@ -46,22 +46,15 @@ def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, me
else:
raise 'machine optimizer method ' + method + ' is not existed'
print('----- Placement machine ' + str(machine_index) + ' ----- ')
# 估算贴装用时
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence, hinter=False)
if hinter:
optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=True, component_hinter=True, feeder_hinter=True)
info.print()
print('----- Placement machine ' + str(machine_index) + ' ----- ')
print('-Cycle counter: {}'.format(info.cycle_counter))
print(f'-Nozzle change counter: {info.nozzle_change_counter: d}')
print(f'-ANC round: {info.anc_round_counter: d}')
print(f'-Pick operation counter: {info.pickup_counter: d}')
print(f'-Pick time: {info.pickup_time: .3f}, distance: {info.pickup_distance: .3f}')
print(f'-Place time: {info.place_time: .3f}, distance: {info.place_distance: .3f}')
print('------------------------------ ')
print('------------------------------ ')
return info

View File

@ -40,7 +40,6 @@ def convert_pcbdata_to_result(pcb_data, component_data):
if slot == 'A':
slot, part = 0, pcb_data.loc[point_cnt].part
else:
slot, part = int(slot[1:]), pcb_data.loc[point_cnt].fdr.split(' ', 1)[1]
head = pcb_data.loc[point_cnt].hd - 1
@ -423,7 +422,8 @@ def optimization_assign_result(component_data, pcb_data, component_result, cycle
component_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else:
part = component_data.loc[index]['part']
component_assign.loc[cycle, 'H{}'.format(head + 1)] = 'C' + str(index)
component_assign.loc[cycle, 'H{}'.format(head + 1)] = part
# component_assign.loc[cycle, 'H{}'.format(head + 1)] = 'C' + str(index)
print(component_assign)
print('')
@ -450,6 +450,7 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
placement_result=None, head_sequence=None, hinter=False):
# === 优化结果参数 ===
info = OptInfo()
# === 校验 ===
info.total_points = 0
for cycle, components in enumerate(component_result):
@ -461,7 +462,7 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
if info.total_points != len(pcb_data):
warning_info = 'the number of placement points is not match with the PCB data. '
warnings.warn(warning_info, UserWarning)
return 0.
return OptInfo()
if placement_result:
total_points = info.total_points
@ -475,7 +476,7 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
warnings.warn(
'the optimization result of component assignment result and placement result are not consistent. ',
UserWarning)
return 0.
return OptInfo()
feeder_arrangement = defaultdict(set)
for cycle, feeder_slots in enumerate(feeder_slot_result):
@ -484,8 +485,9 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
continue
feeder_arrangement[component_result[cycle][head]].add(slot)
info.total_components = len(feeder_arrangement.keys())
for part, data in component_data.iterrows():
if part in feeder_arrangement.keys() and data['feeder-limit'] < len(feeder_arrangement[part]):
if part in feeder_arrangement.keys() and data.fdn < len(feeder_arrangement[part]):
info = 'the number of arranged feeder of [' + data['part'] + '] exceeds the quantity limit'
warnings.warn(info, UserWarning)
return 0.

View File

@ -21,8 +21,8 @@ def feeder_priority_assignment(component_data, pcb_data, hinter=True):
info = placement_info_evaluation(component_data, pcb_data, component_assign, cycle_assign,
feeder_slot_assign, None, None, hinter=False)
val = 0.4 * info.cycle_counter + 2.15 * info.nozzle_change_counter + 0.11 * info.pickup_counter \
+ 0.005 * info.anc_round_counter
val = 0.356 * info.cycle_counter + 0.949 * info.nozzle_change_counter + 0.159 * info.pickup_counter \
+ 0.002 * info.pickup_distance
if feeder_allocate_val is None or val < feeder_allocate_val:
feeder_allocate_val = val
component_result, cycle_result, feeder_slot_result = component_assign, cycle_assign, feeder_slot_assign
@ -91,14 +91,17 @@ def feeder_nozzle_pattern(component_data):
for _ in range(head):
nozzle_pattern_list[-1][head_assign_indexes[-idx]] = nozzle
idx += 1
nozzle_points.pop(min_points_nozzle)
# nozzle_pattern_list = []
# nozzle_pattern_list.append(['CN220', 'CN220', 'CN065', 'CN065', 'CN140', 'CN140'])
return nozzle_pattern_list
def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figure=False, hinter=True):
feeder_points, feeder_division_points = defaultdict(int), defaultdict(int) # 供料器贴装点数
mount_center_pos = defaultdict(float)
feeder_center_pos = defaultdict(float)
feeder_limit, feeder_arrange = defaultdict(int), defaultdict(int)
part_nozzle = defaultdict(str)
@ -109,7 +112,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
for idx, data in component_data.iterrows():
component_index[data.part] = idx
feeder_limit[idx] = data['feeder-limit']
feeder_limit[idx] = data.fdn
feeder_arrange[idx] = 0
for _, data in pcb_data.iterrows():
@ -118,7 +121,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
part_index = component_index[part]
feeder_points[part_index] += 1
mount_center_pos[part_index] += ((pos - mount_center_pos[part_index]) / feeder_points[part_index])
feeder_center_pos[part_index] += ((pos - feeder_center_pos[part_index]) / feeder_points[part_index])
part_nozzle[part_index] = component_data.loc[part_index].nz
for part_index, points in feeder_points.items():
@ -198,8 +201,8 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
if len(tmp_nozzle_component[nozzle]) == 0:
continue
part = max(tmp_nozzle_component[nozzle],
key=lambda x: tmp_feeder_points[x] / tmp_feeder_limit[x] if
tmp_feeder_points[x] != 0 else 0)
key=lambda x: tmp_feeder_points[x] / tmp_feeder_limit[x]
if tmp_feeder_points[x] != 0 else 0)
index_ = tmp_nozzle_component[nozzle].index(part)
if max_points < tmp_nozzle_component_points[nozzle][index_]:
max_points, nozzle_assign = tmp_nozzle_component_points[nozzle][index_], nozzle
@ -210,8 +213,8 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
if len(tmp_nozzle_component[nozzle_assign]) == 0:
# 当前头对应吸嘴类型无可用元件,将计划分配的元件压入堆栈
part = max(tmp_feeder_points.keys(),
key=lambda x: tmp_feeder_points[x] / tmp_feeder_limit[x] if tmp_feeder_limit[
x] != 0 else 0)
key=lambda x: tmp_feeder_points[x] / tmp_feeder_limit[x]
if tmp_feeder_limit[x] != 0 else 0)
for nozzle, component_list in tmp_nozzle_component.items():
if part in component_list:
nozzle_assign = nozzle
@ -227,7 +230,6 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
tmp_feeder_limit[x] != 0 else 0))
part = tmp_nozzle_component[nozzle_assign][index_]
feeder_type = component_data.loc[part].fdr
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval, 1
slot_overlap = False
@ -341,7 +343,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
for head, feeder_ in enumerate(feeder_assign):
if feeder_ < 0:
continue
average_slot.append((mount_center_pos[feeder_] - slotf1_pos[0]) / slot_interval + 1)
average_slot.append((feeder_center_pos[feeder_] - slotf1_pos[0]) / slot_interval + 1)
if nozzle_pattern and component_data.loc[feeder_].nz != nozzle_pattern[head]:
nozzle_change_counter += 1
@ -499,7 +501,9 @@ def feeder_base_scan(component_data, pcb_data, feeder_data):
raise ValueError(info)
component_points[idx] = data.points
component_index[data.part] = idx
if len(feeder_assign_check) != len(component_points) - component_points.count(0):
print(feeder_assign_check)
print(component_points)
assert len(feeder_assign_check) == len(component_points) - component_points.count(0) # 所有供料器均已分配槽位
mount_center_slot = defaultdict(float)

View File

@ -313,7 +313,7 @@ def optimizer_hybrid_genetic(pcb_data, component_data, hinter=True):
idx = component_data[component_data['part'] == part].index.tolist()[0]
nozzle = component_data.loc[idx]['nz']
component_feeder_limit[part] = component_data.loc[idx]['feeder-limit']
component_feeder_limit[part] = component_data.loc[idx].fdn
component_points[part] += 1
if nozzle_components[nozzle].count(part) < component_feeder_limit[part]:
nozzle_components[nozzle].append(part)

View File

@ -1,10 +1,6 @@
from base_optimizer.optimizer_common import *
def list_range(start, end=None):
return list(range(start)) if end is None else list(range(start, end))
def head_task_model(component_data, pcb_data, hinter=True):
mdl = Model('pick_route')

View File

@ -1,8 +1,9 @@
import copy
from base_optimizer.optimizer_common import *
def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_feeder_data=True, cp_auto_register=False):
# 读取PCB数据
def load_data(filename: str, load_feeder=False, auto_register=True):
filename = 'data/' + filename
part_content, step_content = False, False
part_start_line, step_start_line, part_end_line, step_end_line = -1, -1, -1, -1
@ -28,89 +29,152 @@ def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_fee
if part_content:
part_end_line = line_counter
elif step_content:
else:
step_end_line = line_counter
pcb_data = pd.DataFrame(
file_data = pd.DataFrame(
pd.read_csv(filepath_or_buffer=filename, skiprows=step_start_line + 1, nrows=step_end_line - step_start_line + 1,
sep='\t', header=None))
if len(pcb_data.columns) <= 17:
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar",
if len(file_data.columns) == 22:
data_col = ["machine", "bl", "ref", "x", "y", "z", "r", "part", "desc", "group", "fdr", "nz", "hd", "cs", "cy",
"sk", "ar", "fid", "pop", "pl", "lv", "pr"]
elif len(file_data.columns) <= 17:
data_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar",
"pl", "lv"]
elif len(pcb_data.columns) <= 18:
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
elif len(file_data.columns) <= 18:
data_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
"pl", "lv"]
else:
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
data_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
"", "pl", "lv"]
pcb_data.columns = step_col
pcb_data = pcb_data.dropna(axis=1)
file_data.columns = data_col
pcb_data, component_data, feeder_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame), defaultdict(
pd.DataFrame)
# 坐标系处理
# pcb_data = pcb_data.sort_values(by = ['x', 'y'], ascending = True)
# pcb_data["x"] = pcb_data["x"].apply(lambda x: -100+x)
# line_data = line_data.dropna(axis=1)
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "pl", "lv"]
machine_name = defaultdict(int)
for _, data in file_data.iterrows():
if "machine" in file_data.columns:
if data['machine'] not in machine_name.keys():
machine_name[data['machine']] = len(machine_name)
# 注册元件检查
part_feeder_assign = defaultdict(set)
part_col = ["part", "fdr", "nz", 'feeder-limit']
machine_index = machine_name[data['machine']]
else:
machine_index = 0
pcb_data[machine_index] = pcb_data[machine_index]._append(data[step_col], ignore_index=True)
part_col = ["part", "fdr", "nz", 'fdn']
try:
if part_start_line != -1:
component_data = pd.DataFrame(
part_data = pd.DataFrame(
pd.read_csv(filepath_or_buffer=filename, sep='\t', header=None, skiprows=part_start_line + 1,
nrows=part_end_line - part_start_line - 1))
component_data.columns = part_col
part_data.columns = part_col
else:
component_data = pd.DataFrame(columns=part_col)
part_data = pd.DataFrame(columns=part_col)
except:
component_data = pd.DataFrame(columns=part_col)
part_data = pd.DataFrame(columns=part_col)
component_data['points'] = 0
part_col.append('points')
for _, data in pcb_data.iterrows():
part, nozzle = data.part, data.nz.split(' ')[1]
slot = data['fdr'].split(' ')[0]
if part not in component_data['part'].values:
if not cp_auto_register:
raise Exception("unregistered component: " + component_data['part'].values)
else:
component_data = pd.concat([component_data, pd.DataFrame(
[part, '', 'SM8', nozzle, default_feeder_limit, 0], index=part_col).T],
ignore_index=True)
warning_info = 'register component ' + part + ' with default feeder type'
part_data['points'] = 0
part_col = ["part", "fdr", "nz", 'fdn', 'points']
machine_num = len(pcb_data)
for machine_index in range(machine_num):
component_data[machine_index] = pd.DataFrame(columns=part_col)
component_slot = defaultdict(set)
for idx, data in pcb_data[machine_index].iterrows():
if pos := data.fdr.find('F') != 0:
pcb_data[machine_index].loc[idx, 'fdr'] = data.fdr[pos:pos + 1:1] + data.fdr[pos + 2::]
if pos := data.nz.find('F') != -1:
pcb_data[machine_index].loc[idx, 'nz'] = data.nz[0:pos:1] + data.nz[pos + 1::]
if isinstance(data.hd, str) and (pos := data.hd.find('F') != -1):
pcb_data[machine_index].loc[idx, 'hd'] = int(data.hd[pos + 2::])
part, nozzle = data.part, data.nz.split(' ')[1]
if part not in component_data[machine_index]['part'].values:
if not auto_register:
raise Exception("unregistered component: " + component_data[machine_index]['part'].values)
else:
component_data[machine_index] = pd.concat([component_data[machine_index], pd.DataFrame(
[part, 'SM8', nozzle, 0, 0], index=part_col).T], ignore_index=True)
# warning_info = 'register component ' + part + ' with default feeder type'
# warnings.warn(warning_info, UserWarning)
part_index = component_data[machine_index][component_data[machine_index]['part'] == part].index.tolist()[0]
component_data[machine_index].loc[part_index, 'points'] += 1
if (fdr := data['fdr'].split(' ')[0]) not in component_slot[part]:
component_data[machine_index].loc[part_index, 'fdn'] += 1
component_slot[part].add(fdr)
for idx, data in part_data.iterrows():
if data.part in component_slot.keys():
part_data.loc[idx, 'fdn'] = part_data.loc[idx, 'fdn'] - len(component_slot[data.part])
assert part_data.loc[idx, 'fdn'] >= 0
for idx, data in part_data.iterrows():
for machine_index in range(machine_num):
if data.part not in component_data[machine_index].part.values:
continue
part_index = component_data[machine_index][component_data[machine_index].part == data.part].index.tolist()[
0]
if component_data[machine_index].loc[part_index].nz != data.nz:
warning_info = 'the nozzle type of component ' + data.part + ' is not consistent with the pcb data'
warnings.warn(warning_info, UserWarning)
part_index = component_data[component_data['part'] == part].index.tolist()[0]
part_feeder_assign[part].add(slot)
component_data.loc[part_index, 'points'] += 1
if nozzle != 'A' and component_data.loc[part_index, 'nz'] != nozzle:
warning_info = 'the nozzle type of component ' + part + ' is not consistent with the pcb data'
warnings.warn(warning_info, UserWarning)
# 清除点数为0的数据
component_data = component_data[component_data['points'] != 0].reset_index(drop=True)
for idx, data in component_data.iterrows():
if data['fdr'][0:3] == 'SME': # 电动供料器和气动供料器参数一致
component_data.at[idx, 'fdr'] = data['fdr'][0:2] + data['fdr'][3:]
if data.fdn == 0:
continue
if data.part in component_data[0].part.values:
part_index = component_data[0][component_data[0].part == data.part].index.tolist()[0]
component_data[0].loc[part_index, 'fdn'] += data.fdn
else:
component_data[0] = pd.concat([component_data[0], pd.DataFrame(data).T], ignore_index=True)
for machine_index in range(machine_num):
for idx, data in component_data[machine_index].iterrows():
if data['fdr'][0:3] == 'SME': # 电动供料器和气动供料器参数一致
component_data[machine_index].at[idx, 'fdr'] = data['fdr'][0:2] + data['fdr'][3:]
# pcb_data[machine_index].sort_values(by="x", ascending=False, inplace=True)
# pcb_data[machine_index].reset_index(inplace=True)
# 读取供料器基座数据
feeder_data = pd.DataFrame(columns=['slot', 'part', 'arg']) # arg表示是否为预分配不表示分配数目
if load_feeder_data:
for _, data in pcb_data.iterrows():
slot, part = data['fdr'].split(' ')
if slot[0] != 'F' and slot[0] != 'R':
continue
slot = int(slot[1:]) if slot[0] == 'F' else int(slot[1:]) + max_slot_index // 2
feeder_data = pd.concat([feeder_data, pd.DataFrame([slot, part, 1]).T])
feeder_data = defaultdict(pd.DataFrame)
if load_feeder:
for machine_index in range(machine_num):
feeder_data[machine_index] = pd.DataFrame(columns=['slot', 'part', 'arg']) # arg表示是否为预分配不表示分配数目
for _, data in pcb_data[machine_index].iterrows():
slot, part = data['fdr'].split(' ')
if slot[0] != 'F' and slot[0] != 'R':
continue
slot = int(slot[1:]) if slot[0] == 'F' else int(slot[1:]) + max_slot_index // 2
feeder_data[machine_index] = pd.concat([feeder_data[machine_index], pd.DataFrame([slot, part, 1]).T])
feeder_data.drop_duplicates(subset='slot', inplace=True, ignore_index=True)
# 随机移除部分已安装的供料器
if load_feeder_data == 2:
feeder_data[machine_index].drop_duplicates(subset='slot', inplace=True, ignore_index=True)
# 随机移除部分已安装的供料器
drop_index = random.sample(list(range(len(feeder_data))), len(feeder_data) // 2)
feeder_data.drop(index=drop_index, inplace=True)
feeder_data[machine_index].drop(index=drop_index, inplace=True)
feeder_data.sort_values(by='slot', ascending=True, inplace=True, ignore_index=True)
feeder_data[machine_index].sort_values(by='slot', ascending=True, inplace=True, ignore_index=True)
pcb_data = pcb_data.sort_values(by="x", ascending=False)
return pcb_data, component_data, feeder_data
def merge_data(partial_pcb_data, partial_component_data):
assert len(partial_pcb_data) == len(partial_component_data)
machine_num = len(partial_pcb_data)
pcb_data, component_data = copy.deepcopy(partial_pcb_data[0]), copy.deepcopy(partial_component_data[0])
for machine_index in range(1, machine_num):
pcb_data = pd.concat([pcb_data, partial_pcb_data[machine_index]], ignore_index=True)
for _, data in partial_component_data[machine_index].iterrows():
if data.part in component_data.part.values:
part_index = component_data[component_data.part == data.part].index.tolist()[0]
component_data.loc[part_index, 'points'] += data.points
component_data.loc[part_index, 'fdn'] += data.fdn
else:
component_data = pd.concat([component_data, pd.DataFrame(data).T], ignore_index=True)
component_data = component_data[component_data['points'] != 0].reset_index(drop=True)
return pcb_data, component_data

View File

@ -1,3 +1,6 @@
import copy
import random
from generator import *
from base_optimizer.optimizer_interface import *
@ -34,117 +37,172 @@ class LSTMNet(torch.nn.Module):
class Estimator:
def __init__(self, task_block_weight=None):
def __init__(self):
self.data_mgr = DataMgr()
@staticmethod
def training(self, params):
pass
@staticmethod
def testing(self, params):
pass
@staticmethod
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
class NeuralEstimator(Estimator):
def __init__(self):
super().__init__()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device)
self.net.load_state_dict(torch.load('model/net_model.pth'))
self.task_block_weight = task_block_weight
self.net_file = 'model/net_model.pth'
if os.path.exists(self.net_file):
self.net.load_state_dict(torch.load(self.net_file))
with open('model/lr_model.pkl', 'rb') as f:
self.lr = pickle.load(f)
def init_weights(self):
for m in self.net.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
def convert(self, pcb_data, component_data, assignment_result):
machine_num, component_num = len(assignment_result), len(component_data)
def training(self, params):
self.init_weights() # 初始化参数
data = data_mgr.loader('opt/' + params.train_file)
x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()]))
y_train = np.array(data[1][::data_mgr.get_update_round()])
component_machine_index = [0 for _ in range(component_num)]
machine_points = [[[] for _ in range(component_num)] for _ in range(machine_num)]
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
component2idx = defaultdict(int)
for i, data in component_data.iterrows():
component2idx[data.part] = i
optimizer = torch.optim.Adam(self.net.parameters(), lr=params.lr)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1)
for i in range(len(pcb_data)):
part_index = component2idx[pcb_data.iat[i, 5]]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]):
component_machine_index[part_index] += 1
machine_index += 1
else:
loss_func = torch.nn.MSELoss()
for epoch in range(params.num_epochs):
pred = self.net(x_train)
loss = loss_func(pred, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# scheduler.step()
if epoch % 100 == 0:
print('Epoch: ', epoch, ', Loss: ', loss.item())
if loss.item() < 1e-4:
break
for _, data in pcb_data.iterrows():
part_index = component2idx[data.part]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]):
component_machine_index[part_index] += 1
machine_index += 1
else:
break
machine_points[machine_index][part_index].append([data.x, data.y])
net_predict = self.net(x_train).view(-1)
pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
res = []
for machine_index in range(machine_num):
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
cp_width, cp_height = defaultdict(float), defaultdict(float)
board_right_pos, board_left_pos, board_top_pos, board_bottom_pos = None, None, None, None
pred_error = np.array([])
for t1, t2 in np.nditer([pred_time, real_time]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
for part_index in range(component_num):
if assignment_result[machine_index][part_index] == 0:
continue
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
cp_points[part_index] = assignment_result[machine_index][part_index]
cp_nozzle[part_index] = component_data.iloc[part_index]['nz']
mse = np.linalg.norm((net_predict - y_train.view(-1)).cpu().detach().numpy())
print(f'mean square error for training data result : {mse: 2f} ')
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
torch.save(self.net.state_dict(), self.net_file)
# self.net.load_state_dict(torch.load(self.net_file))
cp_right_pos, cp_left_pos = max([p[0] for p in machine_points[machine_index][part_index]]), min(
[p[0] for p in machine_points[machine_index][part_index]])
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1])
cp_top_pos, cp_bottom_pos = max([p[1] for p in machine_points[machine_index][part_index]]), min(
[p[1] for p in machine_points[machine_index][part_index]])
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device)
cp_width[part_index] = cp_right_pos - cp_left_pos
cp_height[part_index] = cp_top_pos - cp_bottom_pos
self.net.eval()
with torch.no_grad():
pred_time = self.net(x_test).view(-1).cpu().detach().numpy()
# x_test = x_test.cpu().detach().numpy()
if board_right_pos is None or cp_right_pos > board_right_pos:
board_right_pos = cp_right_pos
over_set = []
pred_idx, pred_error = 0, np.array([])
for t1, t2 in np.nditer([pred_time, y_test.reshape(-1)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
if board_left_pos is None or cp_left_pos < board_left_pos:
board_left_pos = cp_left_pos
if pred_error[-1] > 5:
over_set.append(pred_idx + 1)
print(f'\033[0;31;31midx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
f'gap: {pred_error[-1]: .3f}\033[0m')
# else:
# print(f'idx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, gap: {pred_error[-1]: .3f}')
if board_top_pos is None or cp_top_pos > board_top_pos:
board_top_pos = cp_top_pos
pred_idx += 1
if board_bottom_pos is None or cp_bottom_pos < board_bottom_pos:
board_bottom_pos = cp_bottom_pos
print('over:', over_set)
print('size:', len(over_set))
res.append([cp_points, cp_nozzle, cp_width, cp_height, board_right_pos - board_left_pos,
board_top_pos - board_bottom_pos])
return res
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .3f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .3f}% ')
def neural_network(self, cp_points, cp_nozzle, board_width, board_height):
mse = np.linalg.norm(pred_time - y_test.reshape(-1))
print(f'mean square error for test data result : {mse: 2f} ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
assert board_width is not None and board_height is not None
encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
return self.net(encoding)[0, 0].item()
def heuristic_reconfiguration(self, cp_points, cp_nozzle):
task_block_number, total_point_number = 0, sum(cp_points.values())
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in cp_points.items():
nozzle_points[cp_nozzle[part]] += points
nozzle_heads[cp_nozzle[part]] = 1
remaining_head = max_head_index - len(nozzle_heads)
class HeuristicEstimator(Estimator):
def __init__(self):
super().__init__()
nozzle_fraction = []
for nozzle, points in nozzle_points.items():
val = remaining_head * points / total_point_number
nozzle_heads[nozzle] += math.floor(val)
nozzle_fraction.append([nozzle, val - math.floor(val)])
self.lr = LinearRegression()
self.pickle_file = 'model/heuristic_lr_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
remaining_head = max_head_index - sum(nozzle_heads.values())
sorted(nozzle_fraction, key=lambda x: x[1])
nozzle_fraction_index = 0
while remaining_head > 0:
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
remaining_head -= 1
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
self.lr.fit(x_fit, y_fit)
for nozzle, heads_number in nozzle_heads.items():
task_block_number = max(self.task_block_weight, math.ceil(nozzle_points[nozzle] / heads_number))
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
with open(self.pickle_file, 'wb') as f:
pickle.dump(self.lr, f)
return (t_pick + t_place) * total_point_number + task_block_number * self.task_block_weight
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_genetic(cp_points, cp_nozzle)).reshape(1, -1))
def heuristic_genetic(self, cp_points, cp_nozzle):
nozzle_points, nozzle_component_points = defaultdict(int), defaultdict(list)
@ -158,7 +216,7 @@ class Estimator:
for idx, (part_index, points) in enumerate(cp_points.items()):
nozzle_component_points[cp_nozzle[part_index]][idx] = points
total_points = sum(cp_points.values()) # num of placement points
nl = sum(cp_points.values()) # num of placement points
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
# assignments of nozzles to heads
@ -168,7 +226,7 @@ class Estimator:
for nozzle in nozzle_points.keys():
if nozzle_points[nozzle] == 0:
continue
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / total_points * total_heads)
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / nl * total_heads)
nozzle_heads[nozzle] += 1
total_heads = (1 + ul) * max_head_index
@ -195,6 +253,9 @@ class Estimator:
nozzle_points[nozzle] -= 1
heads_placement[idx][1] += 1
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
# the number of pick-up operations
# (under the assumption of the number of feeder available for each comp. type is equal 1)
@ -224,18 +285,342 @@ class Estimator:
heads_placement[head][1] -= min(min_points_list)
heads_placement_points[head] -= min(min_points_list)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
return [nl, wl, ul]
return T_pp * total_points + T_tr * wl + T_nc * ul + T_pl * pl
def linear_regression(self, pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
hinter=False)
class RegressionEstimator(Estimator):
def __init__(self):
super().__init__()
self.lr = LinearRegression()
self.pickle_file = 'model/params_lr_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
self.lr.fit(x_fit, y_fit)
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
with open(self.pickle_file, 'wb') as f:
pickle.dump(self.lr, f)
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_reconfig(cp_points, cp_nozzle)).reshape(1, -1))
def heuristic_reconfig(self, cp_points, cp_nozzle):
task_block_number, total_point_number = 0, sum(cp_points.values())
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in cp_points.items():
nozzle_points[cp_nozzle[part]] += points
nozzle_heads[cp_nozzle[part]] = 1
remaining_head = max_head_index - len(nozzle_heads)
nozzle_fraction = []
for nozzle, points in nozzle_points.items():
val = remaining_head * points / total_point_number
nozzle_heads[nozzle] += math.floor(val)
nozzle_fraction.append([nozzle, val - math.floor(val)])
remaining_head = max_head_index - sum(nozzle_heads.values())
sorted(nozzle_fraction, key=lambda x: x[1])
nozzle_fraction_index = 0
while remaining_head > 0:
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
remaining_head -= 1
for nozzle, heads_number in nozzle_heads.items():
task_block_number = max(task_block_number, math.ceil(nozzle_points[nozzle] / heads_number))
return [total_point_number, task_block_number]
class SVREstimator(Estimator):
def __init__(self):
super().__init__()
# === symbiotic organism search parameter ===
# population of meta heuristic: 20
# number of iteration: 100
self.population_size = 20
self.num_iteration = 100
self.w_quart = 1.5
# === support vector regression parameters ===
self.kernel_func = "rbf"
self.C_range = [0.1, 10]
self.gamma_range = [0.01, 0.5]
self.epsilon_range = [0.01, 0.1]
self.benefit_factor = [1, 2]
# number of folds: 5
self.num_folds = 5
self.svr_list = [SVR() for _ in range(self.num_folds + 1)]
for i in range(self.num_folds + 1):
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
if not os.path.exists(pickle_file):
continue
with open(pickle_file, 'rb') as f:
self.svr_list[i] = pickle.load(f)
self.pbar = tqdm(total=self.num_iteration * self.num_folds * self.population_size)
self.pbar.set_description('svr training process')
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
Q1, Q3 = np.percentile(np.array(data[1]), 25), np.percentile(np.array(data[1]), 75)
indices = [i for i in range(len(data[1])) if Q1 - self.w_quart * (Q3 - Q1) <= data[1][i] <= Q3 + self.w_quart * (Q3 - Q1)]
data[0], data[1] = [data[0][i] for i in indices], [data[1][i] for i in indices]
self.svr_list = []
division = len(data[0]) // self.num_folds
for cnt in range(self.num_folds):
x_train, y_train = data[0], data[1]
x_train = [[sum(x_train[i][0].values()), x_train[i][2], x_train[i][3]] for i in range(len(data[0])) if
not cnt * division <= i < (cnt + 1) * division]
y_train = [y_train[i] for i in range(len(data[0])) if not cnt * division <= i < (cnt + 1) * division]
self.svr_list.append(self.sos_svr_training(x_train, y_train))
final_input, final_output = [], []
for cnt in range(self.num_folds):
x_valid = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0])) if
cnt * division <= i < (cnt + 1) * division]
final_input.extend([[v] for v in self.svr_list[cnt].predict(x_valid)])
final_output.extend(
[data[1][i] for i in range(len(data[0])) if cnt * division <= i < (cnt + 1) * division])
self.svr_list.append(self.sos_svr_training(final_input, final_output))
if params.save:
for i in range(self.num_folds + 1):
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
with open(pickle_file, 'wb') as f:
pickle.dump(self.svr_list[i], f)
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
predict_y = []
for cnt in range(self.num_folds):
predict_y.extend(self.svr_list[cnt].predict(predict_x))
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
pred_error = np.array([])
for t1, t2 in np.nditer([data[1], predict_val]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
def sos_svr_training(self, x_train, y_train):
population = []
for _ in range(self.population_size):
svr_param = [random.uniform(self.C_range[0], self.C_range[1]),
random.uniform(self.gamma_range[0], self.gamma_range[1]),
random.uniform(self.epsilon_range[0], self.epsilon_range[1])]
population.append(SVR(kernel=self.kernel_func, C=svr_param[0], gamma=svr_param[1], epsilon=svr_param[2]))
population_val = []
for individual in population:
population_val.append(self.svr_error(individual, x_train, y_train))
for _ in range(self.num_iteration):
best_svr = population[np.argmin(population_val)]
for i in range(self.population_size):
# === mutualism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
Mv_C, Mv_gamma, Mv_epsilon = (population[i].C + population[j].C) / 2, (
population[i].gamma + population[j].gamma) / 2, (
population[i].epsilon + population[j].epsilon) / 2
for idx, svr in zip([i, j], [population[i], population[j]]):
new_C = svr.C + random.random() * (best_svr.C - Mv_C * random.choice(self.benefit_factor))
new_gamma = svr.gamma + random.random() * (
best_svr.gamma - Mv_gamma * random.choice(self.benefit_factor))
new_epsilon = svr.epsilon + random.random() * (
best_svr.epsilon - Mv_epsilon * random.choice(self.benefit_factor))
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[idx]:
population[idx], population_val[idx] = new_svr, new_svr_val
# === commensalism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
new_C = population[i].C + random.uniform(-1, 1) * (best_svr.C - population[j].C)
new_gamma = population[i].gamma + random.uniform(-1, 1) * (best_svr.gamma - population[j].gamma)
new_epsilon = population[i].epsilon + random.uniform(-1, 1) * (
best_svr.epsilon - population[j].epsilon)
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[j]:
population[j], population_val[j] = new_svr, new_svr_val
# === parasitism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
new_svr = copy.deepcopy(population[j])
idx = random.randint(0, 2)
if idx == 0:
new_svr.C = random.uniform(self.C_range[0], self.C_range[1])
elif idx == 1:
new_svr.gamma = random.uniform(self.gamma_range[0], self.gamma_range[1])
else:
new_svr.epsilon = random.uniform(self.epsilon_range[0], self.epsilon_range[1])
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[j]:
population[j], population_val[j] = new_svr, new_svr_val
self.pbar.update(1)
return population[np.argmin(population_val)]
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
predict_y = []
for cnt in range(self.num_folds):
predict_y.extend(self.svr_list[cnt].predict(predict_x))
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
pred_error = np.array([])
for t1, t2 in np.nditer([data[1], predict_val]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
def svr_error(self, svr, x_train, y_train):
num_data = len(x_train)
num_division = len(x_train) // self.num_folds
pred_error = np.array([])
for cnt in range(self.num_folds):
x_fit = [x_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
y_fit = [y_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
svr.fit(x_fit, y_fit)
x_valid = [x_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
y_valid = [y_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
for t1, t2 in np.nditer([y_valid, svr.predict(x_valid)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
return np.average(pred_error)
def exact_assembly_time(pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
hinter=False)
placement_result, head_sequence_result = greedy_placement_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence_result)
# regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter,
# info.pickup_counter, info.total_points]]
# return self.lr.predict(regression_info)[0, 0]
return info.total_time
if __name__ == '__main__':
warnings.simplefilter(action='ignore', category=FutureWarning)
parser = argparse.ArgumentParser(description='network training implementation')
# parser.add_argument('--train', default=True, type=bool, help='determine whether training the network')
parser.add_argument('--save', default=True, type=bool,
help='determine whether saving the parameters of network, linear regression model, etc.')
parser.add_argument('--overwrite', default=False, type=bool,
help='determine whether overwriting the training and testing data')
parser.add_argument('--train_file', default='train_data - bp.txt', type=str, help='training file path')
parser.add_argument('--test_file', default='test_data - bp.txt', type=str, help='testing file path')
parser.add_argument('--num_epochs', default=10000, type=int, help='number of epochs for training process')
parser.add_argument('--batch_size', default=1000, type=int, help='size of training batch')
parser.add_argument('--lr', default=1e-5, type=float, help='learning rate for the network')
parser.add_argument('--model', default='neural-network', help='method for assembly time estimation')
params = parser.parse_args()
data_mgr = DataMgr()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if params.overwrite:
file = {params.train_file: params.batch_size,
params.test_file: params.batch_size // data_mgr.get_update_round() // 5}
for file_name, file_batch_size in file.items():
with open('opt/' + file_name, 'a') as f:
for _ in range(int(file_batch_size)):
mode = file_name.split('.')[0].split('_')[0]
pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data
# data_mgr.remover() # remove the last saved data
# data_mgr.saver('data/' + file_name, pcb_data) # save new data
info = base_optimizer(1, pcb_data, component_data,
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method='feeder-scan', hinter=True)
data_mgr.recorder(f, info, pcb_data, component_data)
f.close()
estimator = NeuralEstimator()
estimator.training(params)
estimator.testing(params)
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result)
regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter,
info.pickup_counter, info.total_points]]
return self.lr.predict(regression_info)[0, 0]

View File

@ -118,7 +118,9 @@ class DataMgr:
self.pre_file = None
def encode(self, cp_points: defaultdict[str], cp_nozzle: defaultdict[str], board_width, board_height):
assert len(cp_points.keys()) == len(cp_nozzle.keys())
assert len(cp_nozzle.keys()) <= self.max_component_types and len(
set(cp_nozzle.values())) <= self.max_nozzle_types
# === general info ===
total_points = sum(points for points in cp_points.values())
total_component_types, total_nozzle_types = len(cp_points.keys()), len(set(cp_nozzle.values()))
@ -127,7 +129,7 @@ class DataMgr:
data.extend([board_width, board_height])
# === heuristic info ===
cycle, nozzle_change, anc_move, pickup = self.heuristic_estimator(cp_points, cp_nozzle)
cycle, nozzle_change, anc_move, pickup = self.heuristic_objective(cp_points, cp_nozzle)
data.extend([cycle, nozzle_change, anc_move, pickup])
# === nozzle info ===
@ -159,7 +161,7 @@ class DataMgr:
# for _ in range(self.max_component_types - total_component_types):
# data.extend([0 for _ in range(self.max_nozzle_types)])
# === new component info ===
# === component info ===
comp_data_slice = defaultdict(list)
for idx in range(self.max_nozzle_types):
comp_data_slice[idx] = []
@ -180,7 +182,10 @@ class DataMgr:
data.extend(comp_data_slice[idx])
return data
def heuristic_estimator(self, cp_points, cp_nozzle):
def heuristic_objective(self, cp_points, cp_nozzle):
if len(cp_points.keys()) == 0:
return 0
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for idx, points in cp_points.items():
if points == 0:
@ -400,8 +405,9 @@ class DataMgr:
return pcb_data, component_data
def loader(self, file_path):
train_data, time_data = [], []
cycle_data, nozzle_change_data, anc_move_data, pickup_data, movement_data, point_data = [], [], [], [], [], []
input_data, output_data = [], [] # 输入数据包含元件点数、吸嘴信息等,输出信息包含组装时间
# cycle_data, nozzle_change_data, anc_move_data, pickup_data, point_data = [], [], [], [], []
# pick_move_data, place_move_data = [], []
with open(file_path, 'r') as file:
line = file.readline()
while line:
@ -416,33 +422,56 @@ class DataMgr:
component_type, nozzle_type = items[12 + cp_idx * 3], items[13 + cp_idx * 3]
cp_points[component_type], cp_nozzle[component_type] = points, nozzle_type
# cp_width[component_type], cp_height[component_type] = float(items[15 + cp_idx * 5]), float(
# items[16 + cp_idx * 5])
# if len(set(cp_nozzle.values())) > 2 or len(set(cp_nozzle.keys())) > 3:
if len(cp_points.keys()) > 30:
if len(cp_points.keys()) > 20 or len(cp_points.keys()) < 5:
line = file.readline()
continue
cycle_data.append(float(items[1]))
nozzle_change_data.append(float(items[2]))
anc_move_data.append(float(items[3]))
pickup_data.append(float(items[4]))
movement_data.append(float(items[5]) + float(items[6]))
point_data.append(sum(pt for pt in cp_points.values()))
board_width, board_height = float(items[7]), float(items[8])
# cycle_data.append(float(items[1]))
# nozzle_change_data.append(float(items[2]))
# anc_move_data.append(float(items[3]))
# pickup_data.append(float(items[4]))
# pick_move_data.append(float(items[5]))
# place_move_data.append(float(items[6]))
# point_data.append(sum(pt for pt in cp_points.values()))
# assembly time data
time_data.append(float(items[0]))
output_data.append(float(items[0]))
train_data.append(self.encode(cp_points, cp_nozzle, float(items[7]), float(items[8])))
# train_data.append(self.encode(cp_points, cp_nozzle, float(items[7]), float(items[8])))
input_data.append([cp_points, cp_nozzle, board_width, board_height])
# train_data[-1].extend([cycle_data[-1], nozzle_change_data[-1], anc_move_data[-1], pickup_data[-1]])
line = file.readline()
return train_data, time_data, cycle_data, nozzle_change_data, anc_move_data, pickup_data, point_data
# return train_data, time_data, cycle_data, nozzle_change_data, anc_move_data, pickup_data, pick_move_data, \
# place_move_data, point_data
return [input_data, output_data]
def neural_encode(self, input_data):
train_data = []
for cp_points, cp_nozzle, board_width, board_height in input_data:
train_data.append(self.encode(cp_points, cp_nozzle, board_width, board_height))
return train_data
def get_feature(self):
return (self.max_component_types + 2) * self.max_nozzle_types + 5 + 4
# def neural_encode(self, input_data):
# train_data = []
# for cp_points, cp_nozzle, board_width, board_height in input_data:
# train_data.append(
# [len(cp_points.keys()), len(cp_nozzle.keys()), sum(cp_points.values()), board_width, board_height])
# return train_data
#
# def get_feature(self):
# return 5
def get_update_round(self):
return self.update

View File

@ -1,6 +1,6 @@
# implementation of <<An integrated allocation method for the PCB assembly line balancing problem with nozzle changes>>
from base_optimizer.optimizer_common import *
from optimizer_hyperheuristic import *
from lineopt_hyperheuristic import *
def selective_initialization(component_points, component_feeders, population_size, machine_number):
@ -160,8 +160,7 @@ def cal_individual_val(component_points, component_nozzle, machine_number, indiv
if points == 0:
continue
cp_points[part_index], cp_nozzle[part_index] = points, component_nozzle[part_index]
# objective_val = max(objective_val, estimator.neural_network(cp_points, cp_nozzle, 237.542, 223.088))
objective_val = max(objective_val, estimator.heuristic_genetic(cp_points, cp_nozzle))
objective_val = max(objective_val, estimator.predict(cp_points, cp_nozzle))
return objective_val, machine_component_points
@ -188,19 +187,18 @@ def individual_convert(component_points, individual):
def line_optimizer_genetic(component_data, machine_number):
# basic parameter
# crossover rate & mutation rate: 80% & 10%cizh
# crossover rate & mutation rate: 80% & 10%
# population size: 200
# the number of generation: 500
crossover_rate, mutation_rate = 0.8, 0.1
population_size, n_generations = 200, 500
estimator = Estimator()
estimator = HeuristicEstimator()
# the number of placement points, the number of available feeders, and nozzle type of component respectively
cp_points, cp_feeders, cp_nozzle = defaultdict(int), defaultdict(int), defaultdict(int)
for part_index, data in component_data.iterrows():
cp_points[part_index] += data['points']
cp_feeders[part_index] = data['feeder-limit']
cp_nozzle[part_index] = data['nz']
cp_points[part_index] += data.points
cp_feeders[part_index], cp_nozzle[part_index] = data.fdn, data.nz
# population initialization
population = selective_initialization(sorted(cp_points.items(), key=lambda x: x[0]), cp_feeders, population_size,
@ -261,7 +259,7 @@ def line_optimizer_genetic(component_data, machine_number):
print('final value: ', val)
# available feeder check
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
feeder_limit = data.fdn
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index]:
feeder_limit -= 1

View File

@ -5,7 +5,7 @@ import random
import numpy as np
from base_optimizer.optimizer_common import *
from base_optimizer.optimizer_feederpriority import *
from base_optimizer.smopt_feederpriority import *
from base_optimizer.result_analysis import *
@ -174,11 +174,11 @@ def assembly_time_estimator(assignment_points, arranged_feeders, component_data)
for idx, points in enumerate(assignment_points):
if points == 0:
continue
feeder_limit = int(component_data.iloc[idx]['feeder-limit'])
feeder_limit = int(component_data.iloc[idx].fdn)
reminder_points = points % feeder_limit
for _ in range(feeder_limit):
cp_info.append(
[idx, points // feeder_limit + (1 if reminder_points > 0 else 0), component_data.iloc[idx]['nz']])
[idx, points // feeder_limit + (1 if reminder_points > 0 else 0), component_data.iloc[idx].nz])
reminder_points -= 1
cp_info.sort(key=lambda x: -x[1])
@ -217,7 +217,7 @@ def line_optimizer_heuristic(component_data, machine_number):
total_points += data['point']
# first step: generate the initial solution with equalized workload
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
assignment_result = [[0 for _ in range(component_number)] for _ in range(machine_number)]
assignment_points = [0 for _ in range(machine_number)]
average_points = total_points // machine_number
@ -232,8 +232,8 @@ def line_optimizer_heuristic(component_data, machine_number):
# define the machine that assigning placement points (considering the feeder limitation)
for machine_index in np.argsort(assignment_points):
if len(machine_set) >= component_data.iloc[part_index]['points'] or len(machine_set) >= \
component_data.iloc[part_index]['feeder-limit']:
if len(machine_set) >= component_data.iloc[part_index].points or len(machine_set) >= \
component_data.iloc[part_index].fdn:
break
machine_set.append(machine_index)
@ -295,10 +295,10 @@ def line_optimizer_heuristic(component_data, machine_number):
# second step: estimate the assembly time for each machine
arranged_feeders = defaultdict(list)
for machine_index in range(machine_number):
arranged_feeders[machine_index] = [0 for _ in range(len(component_data))]
arranged_feeders[machine_index] = [0 for _ in range(component_number)]
for part_index in range(len(component_data)):
feeder_limit = component_data.iloc[part_index]['feeder-limit'] # 总体可用数
for part_index in range(component_number):
feeder_limit = component_data.iloc[part_index].fdn # 总体可用数
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index] == 0:
continue
@ -307,8 +307,8 @@ def line_optimizer_heuristic(component_data, machine_number):
arranged_feeders[machine_index][part_index] = 1
assert feeder_limit >= 0
for part_index in range(len(component_data)):
total_feeder_limit = component_data.iloc[part_index]['feeder-limit'] - sum(
for part_index in range(component_number):
total_feeder_limit = component_data.iloc[part_index].fdn - sum(
[arranged_feeders[machine_index][part_index] for machine_index in range(machine_number)])
while total_feeder_limit > 0:
max_ratio, max_ratio_machine = None, -1
@ -368,7 +368,7 @@ def line_optimizer_heuristic(component_data, machine_number):
supply_machine_list.sort(key=lambda mi: -machine_reallocate_points[mi])
for supply_mi in supply_machine_list:
for part_index in range(len(component_data)):
for part_index in range(component_number):
if assignment_result[supply_mi][part_index] <= 0:
continue
@ -380,7 +380,7 @@ def line_optimizer_heuristic(component_data, machine_number):
tmp_reallocate_result[supply_mi] -= reallocate_points
tmp_reallocate_result[demand_mi] += reallocate_points
if sum(1 for pt in tmp_reallocate_result if pt > 0) > component_data.iloc[part_index]['feeder-limit']:
if sum(1 for pt in tmp_reallocate_result if pt > 0) > component_data.iloc[part_index].fdn:
continue
assignment_result[supply_mi][part_index] -= reallocate_points
@ -393,7 +393,7 @@ def line_optimizer_heuristic(component_data, machine_number):
# 2. balance the number of placements of the different type between different machines.
cp_info = []
for part_index in range(len(component_data)):
for part_index in range(component_number):
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index] == 0:
continue

429
lineopt_hyperheuristic.py Normal file
View File

@ -0,0 +1,429 @@
import os
import pickle
import random
import numpy as np
import pandas as pd
import torch.nn
from base_optimizer.optimizer_interface import *
from generator import *
from estimator import *
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class Heuristic:
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
return -1
class LeastPoints(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_points = []
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_points.append(sum([cp_points[cp_idx] for cp_idx in cp_assign[machine_idx]]))
return machine_assign[np.argmin(machine_points)]
class LeastNzTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_nozzle = []
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nozzle.append([cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]])
index = np.argmin(
[len(set(nozzle)) + 1e-5 * sum(cp_points[c] for c in cp_assign[machine_idx]) for machine_idx, nozzle in
enumerate(machine_nozzle)])
return machine_assign[index]
class LeastCpTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_types = []
if len(machine_assign) == 1:
return machine_assign[0]
for machine_idx in machine_assign:
machine_types.append(
len(cp_assign[machine_idx]) + 1e-5 * sum(cp_points[cp] for cp in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_types)]
class LeastCpNzRatio(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_nz_type, machine_cp_type = [], []
if len(machine_assign) == 1:
return machine_assign[0]
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nz_type.append(set(cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]))
machine_cp_type.append(len(cp_assign[machine_idx]))
min_idx = np.argmin([(machine_cp_type[idx] + 1e-5 * sum(cp_points[c] for c in cp_assign[idx])) / (
len(machine_nz_type[idx]) + 1e-5) for idx in range(len(machine_assign))])
return machine_assign[min_idx]
def nozzle_assignment(cp_points, cp_nozzle, cp_assign):
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for cp_idx in cp_assign:
nozzle_points[cp_nozzle[cp_idx]] += cp_points[cp_idx]
nozzle_heads[cp_nozzle[cp_idx]] = 1
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
return nozzle_heads, nozzle_points
class LeastCycle(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_cycle = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
machine_cycle.append(
max(nozzle_points[nozzle] / head for nozzle, head in nozzle_heads.items()) + 1e-5 * sum(
cp_points[c] for c in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_cycle)]
class LeastNzChange(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_nozzle_change = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
if len(assign_component) == 0:
return machine_idx
heads_points = []
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
for nozzle, head in nozzle_heads.items():
for _ in range(head):
heads_points.append(nozzle_points[nozzle] / nozzle_heads[nozzle])
machine_nozzle_change.append(np.std(heads_points) + 1e-5 * sum(cp_points[c] for c in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_nozzle_change)]
class LeastPickup(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_pick_up = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
nozzle_level, nozzle_counter = defaultdict(int), defaultdict(int)
level_points = defaultdict(int)
for cp_idx in sorted(assign_component, key=lambda x: cp_points[x], reverse=True):
nozzle, points = cp_nozzle[cp_idx], cp_points[cp_idx]
if nozzle_counter[nozzle] and nozzle_counter[nozzle] % nozzle_heads[nozzle] == 0:
nozzle_level[nozzle] += 1
level = nozzle_level[nozzle]
level_points[level] = max(level_points[level], points)
nozzle_counter[nozzle] += 1
machine_pick_up.append(sum(points for points in level_points.values()) + 1e-5 * sum(
cp_points[idx] for idx in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_pick_up)]
def generate_pattern(heuristic_map, cp_points):
"""
Generates a random pattern.
:return: The generated pattern string.
"""
return "".join([random.choice(list(heuristic_map.keys())) for _ in range(random.randrange(1, len(cp_points)))])
def crossover(cp_points, parent1, parent2):
"""
Attempt to perform crossover between two chromosomes.
:param parent1: The first parent.
:param parent2: The second parent.
:return: The two individuals after crossover has been performed.
"""
point1, point2 = random.randrange(len(parent1)), random.randrange(len(parent2))
substr1, substr2 = parent1[point1:], parent2[point2:]
offspring1, offspring2 = "".join((parent1[:point1], substr2)), "".join((parent2[:point2], substr1))
return offspring1[:len(cp_points)], offspring2[:len(cp_points)]
def mutation(heuristic_map, cp_points, individual):
"""
Attempts to mutate the individual by replacing a random heuristic in the chromosome by a generated pattern.
:param individual: The individual to mutate.
:return: The mutated individual.
"""
pattern = list(individual)
mutation_point = random.randrange(len(pattern))
pattern[mutation_point] = generate_pattern(heuristic_map, cp_points)
return ''.join(pattern)[:len(cp_points)]
def population_initialization(population_size, heuristic_map, cp_points):
return [generate_pattern(heuristic_map, cp_points) for _ in range(population_size)]
def convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, component_list, individual,
machine_number):
cp_assign = [[] for _ in range(machine_number)]
machine_all, machine_assign = list(range(machine_number)), defaultdict(set)
for idx, div_cp_idx in enumerate(component_list):
h = individual[idx % len(individual)]
cp_idx = cp_index[div_cp_idx]
if len(machine_assign[cp_idx]) < cp_feeders[cp_idx]:
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, machine_all)
else:
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, list(machine_assign[cp_idx]))
cp_assign[machine_idx].append(div_cp_idx)
machine_assign[cp_idx].add(machine_idx)
return cp_assign
def cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width, board_height,
component_list, individual, machine_number, estimator):
machine_cp_assign = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, component_list,
individual, machine_number)
component_number = len(cp_feeders)
machine_cp_points = [[0 for _ in range(component_number)] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for idx in machine_cp_assign[machine_idx]:
machine_cp_points[machine_idx][cp_index[idx]] += cp_points[idx]
machine_cp_feeders = [[0 for _ in range(component_number)] for _ in range(machine_number)]
for cp_idx in range(component_number):
feeder_nums = cp_feeders[cp_idx]
for machine_idx in range(machine_number):
if machine_cp_points[machine_idx][cp_idx]:
machine_cp_feeders[machine_idx][cp_idx] = 1
feeder_nums -= 1
while feeder_nums > 0:
assign_machine = None
for machine_idx in range(machine_number):
if machine_cp_points[machine_idx][cp_idx] == 0:
continue
if assign_machine is None:
assign_machine = machine_idx
continue
if machine_cp_points[assign_machine][cp_idx] / machine_cp_feeders[assign_machine][cp_idx] \
< machine_cp_points[machine_idx][cp_idx] / machine_cp_feeders[machine_idx][cp_idx]:
assign_machine = machine_idx
machine_cp_feeders[assign_machine][cp_idx] += 1
feeder_nums -= 1
nozzle_type = defaultdict(str)
for idx, cp_idx in cp_index.items():
nozzle_type[cp_idx] = cp_nozzle[idx]
objective_val = []
for machine_idx in range(machine_number):
div_cp_points, div_cp_nozzle = defaultdict(int), defaultdict(str)
idx = 0
for cp_idx in range(component_number):
total_points = machine_cp_points[machine_idx][cp_idx]
if total_points == 0:
continue
div_index = 0
div_points = [total_points // machine_cp_feeders[machine_idx][cp_idx] for _ in
range(machine_cp_feeders[machine_idx][cp_idx])]
while sum(div_points) < total_points:
div_points[div_index] += 1
div_index += 1
for points in div_points:
div_cp_points[idx] = points
div_cp_nozzle[idx] = nozzle_type[cp_idx]
idx += 1
objective_val.append(estimator.predict(div_cp_points, div_cp_nozzle, board_width, board_height))
return objective_val
def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
heuristic_map = {
'p': LeastPoints,
'n': LeastNzChange,
'c': LeastCpTypes,
'r': LeastCpNzRatio,
'k': LeastCycle,
'g': LeastNzChange,
'u': LeastPickup,
}
# genetic-based hyper-heuristic
crossover_rate, mutation_rate = 0.6, 0.1
population_size, n_generations = 20, 50
n_iterations = 10
estimator = NeuralEstimator()
best_val = None
best_heuristic_list = None
best_component_list = None
cp_feeders, cp_nozzle = defaultdict(int), defaultdict(str)
cp_points, cp_index = defaultdict(int), defaultdict(int)
division_component_data = pd.DataFrame(columns=component_data.columns)
division_points = min(component_data['points'])
idx = 0
for cp_idx, data in component_data.iterrows():
cp_feeders[cp_idx] = data['fdn']
division_data = copy.deepcopy(data)
feeder_limit, total_points = division_data.fdn, division_data.points
feeder_limit = max(total_points // division_points * 3, feeder_limit)
surplus_points = total_points % feeder_limit
for _ in range(feeder_limit):
division_data.fdn, division_data.points = 1, math.floor(total_points / feeder_limit)
if surplus_points:
division_data.points += 1
surplus_points -= 1
cp_points[idx], cp_nozzle[idx] = division_data.points, division_data.nz
cp_index[idx] = cp_idx
idx += 1
division_component_data = pd.concat([division_component_data, pd.DataFrame(division_data).T])
division_component_data = division_component_data.reset_index()
component_list = [idx for idx, data in division_component_data.iterrows() if data.points > 0]
board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
with tqdm(total=n_generations * n_iterations) as pbar:
pbar.set_description('hyper-heuristic algorithm process for PCB assembly line balance')
for _ in range(n_iterations):
random.shuffle(component_list)
new_population = []
population = population_initialization(population_size, heuristic_map, cp_points)
# calculate fitness value
pop_val = []
for individual in population:
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
board_height, component_list, individual, machine_number, estimator)
pop_val.append(max(val))
for _ in range(n_generations):
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
population += new_population
for individual in new_population:
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
board_height, component_list, individual, machine_number, estimator)
pop_val.append(max(val))
# min-max convert
max_val = max(pop_val)
sel_pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(sel_pop_val) + 1e-10
sel_pop_val = [v / sum_pop_val + 1e-3 for v in sel_pop_val]
# crossover and mutation
new_population = []
for pop in range(population_size):
if pop % 2 == 0 and np.random.random() < crossover_rate:
index1 = roulette_wheel_selection(sel_pop_val)
while True:
index2 = roulette_wheel_selection(sel_pop_val)
if index1 != index2:
break
offspring1, offspring2 = crossover(cp_points, population[index1], population[index2])
if np.random.random() < mutation_rate:
offspring1 = mutation(heuristic_map, cp_points, offspring1)
if np.random.random() < mutation_rate:
offspring2 = mutation(heuristic_map, cp_points, offspring2)
new_population.append(offspring1)
new_population.append(offspring2)
if len(new_population) >= population_size * crossover_rate:
break
pbar.update(1)
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
board_height, component_list, population[0], machine_number, estimator)
machine_assign = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders,
component_list, population[0], machine_number)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for idx in machine_assign[machine_idx]:
assignment_result[machine_idx][cp_index[idx]] += cp_points[idx]
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data,
assignment_result)
max_machine_idx = np.argmax(val)
val = exact_assembly_time(partial_pcb_data[max_machine_idx], partial_component_data[max_machine_idx])
if best_val is None or val < best_val:
for machine_idx in range(machine_number):
if machine_idx == max_machine_idx:
continue
val = max(val,
exact_assembly_time(partial_pcb_data[machine_idx], partial_component_data[machine_idx]))
if best_val is None or val < best_val:
best_val = val
best_heuristic_list = population[0]
best_component_list = component_list.copy()
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width, board_height,
best_component_list, best_heuristic_list, machine_number, estimator)
print(val)
machine_cp_points = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders,
best_component_list, best_heuristic_list, machine_number)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for idx in machine_cp_points[machine_idx]:
assignment_result[machine_idx][cp_index[idx]] += cp_points[idx]
return assignment_result

173
lineopt_model.py Normal file
View File

@ -0,0 +1,173 @@
import copy
import pandas as pd
from base_optimizer.optimizer_common import *
from base_optimizer.result_analysis import *
def line_optimizer_model(component_data, pcb_data, machine_num, hinter=True):
mdl = Model('pcb assembly line optimizer')
mdl.setParam('Seed', 0)
mdl.setParam('OutputFlag', hinter) # set whether output the debug information
mdl.setParam('TimeLimit', 600)
nozzle_type, component_type = [], []
for _, data in component_data.iterrows():
if not data.nz in nozzle_type:
nozzle_type.append(data.nz)
component_type.append(data.part)
average_pos = 0
for _, data in pcb_data.iterrows():
average_pos += data.x
slot_start = int(round(average_pos / len(pcb_data) + stopper_pos[0] - slotf1_pos[0]) / slot_interval) + 1
ratio = 1
J = len(nozzle_type)
N = 10000
M = machine_num
H = max_head_index
I = len(component_data)
S = min(len(component_data) * ratio, 60)
K = len(pcb_data)
CompOfNozzle = [[0 for _ in range(J)] for _ in range(I)] # Compatibility
component_point = [0 for _ in range(I)]
for idx, data in component_data.iterrows():
nozzle = component_data.iloc[idx].nz
CompOfNozzle[idx][nozzle_type.index(nozzle)] = 1
component_point[idx] = data.points
# objective related
g = mdl.addVars(list_range(K), list_range(M), vtype=GRB.BINARY)
d = mdl.addVars(list_range(K - 1), list_range(H), list_range(M), vtype=GRB.CONTINUOUS)
# u = mdl.addVars(list_range(K), list_range(M), vtype=GRB.INTEGER)
d_plus = mdl.addVars(list_range(J), list_range(H), list_range(K - 1), list_range(M), vtype=GRB.CONTINUOUS)
d_minus = mdl.addVars(list_range(J), list_range(H), list_range(K - 1), list_range(M), vtype=GRB.CONTINUOUS)
e = mdl.addVars(list_range(-(H - 1) * ratio, S), list_range(K), list_range(M), vtype=GRB.BINARY)
f = mdl.addVars(list_range(S), list_range(I), list_range(M), vtype=GRB.BINARY, name='')
x = mdl.addVars(list_range(I), list_range(S), list_range(K), list_range(H), list_range(M), vtype=GRB.BINARY)
n = mdl.addVars(list_range(H), list_range(M), vtype=GRB.CONTINUOUS)
obj = mdl.addVar(lb=0, ub=N, vtype=GRB.CONTINUOUS)
mdl.addConstrs(g[k, m] >= g[k + 1, m] for k in range(K - 1) for m in range(M))
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for i in range(I) for s in range(S)) <= g[k, m] for k in range(K) for h in range(H)
for m in range(M))
# nozzle no more than 1 for head h and cycle k
mdl.addConstrs(
quicksum(CompOfNozzle[i][j] * x[i, s, k, h, m] for i in range(I) for s in range(S) for j in range(J)) <= 1 for k
in range(K) for h in range(H) for m in range(M))
# nozzle available number constraint
mdl.addConstrs(
quicksum(CompOfNozzle[i][j] * x[i, s, k, h, m] for i in range(I) for s in range(S) for h in range(H)) <= H for k
in range(K) for j in range(J) for m in range(M))
# work completion
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for s in range(S) for k in range(K) for h in range(H) for m in range(M)) ==
component_point[i] for i in range(I))
# nozzle change
mdl.addConstrs(quicksum(CompOfNozzle[i][j] * x[i, s, k, h, m] for i in range(I) for s in range(S)) - quicksum(
CompOfNozzle[i][j] * x[i, s, k + 1, h, m] for i in range(I) for s in range(S)) == d_plus[j, h, k, m] - d_minus[
j, h, k, m] for k in range(K - 1) for j in range(J) for h in range(H) for m in range(M))
mdl.addConstrs(2 * d[k, h, m] == quicksum(d_plus[j, h, k, m] for j in range(J)) + quicksum(
d_minus[j, h, k, m] for j in range(J)) for k in range(K - 1) for h in range(H) for m in range(M))
mdl.addConstrs(n[h, m] == quicksum(d[k, h, m] for k in range(K - 1)) - 0.5 for h in range(H) for m in range(M))
# simultaneous pick
for s in range(-(H - 1) * ratio, S):
rng = list(range(max(0, -math.floor(s / ratio)), min(H, math.ceil((S - s) / ratio))))
for k in range(K):
mdl.addConstrs(
quicksum(x[i, s + h * ratio, k, h, m] for h in rng for i in range(I)) <= N * e[s, k, m] for m in
range(M))
mdl.addConstrs(
quicksum(x[i, s + h * ratio, k, h, m] for h in rng for i in range(I)) >= e[s, k, m] for m in range(M))
# pickup movement
# mdl.addConstrs(u[k, m] >= s1 * e[s1, k, m] - s2 * e[s2, k, m] for s1 in range(-(H - 1) * ratio, S) for s2 in
# range(-(H - 1) * ratio, S) for k in range(K))
# feeder related
mdl.addConstrs(quicksum(f[s, i, m] for s in range(S) for m in range(M)) <= 1 for i in range(I))
mdl.addConstrs(quicksum(f[s, i, m] for i in range(I)) <= 1 for s in range(S) for m in range(M))
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for h in range(H) for k in range(K)) >= f[s, i, m] for i in range(I) for s in range(S)
for m in range(M))
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for h in range(H) for k in range(K)) <= N * f[s, i, m] for i in range(I) for s in
range(S) for m in range(M))
mdl.addConstrs(
quicksum(f[s, i, m] for i in range(I)) >= quicksum(f[s + 1, i, m] for i in range(I)) for s in range(S - 1) for m
in range(M))
# objective
T_cy, T_nz, T_pu, T_pl = 2, 3, 1, 1
mdl.addConstrs(obj >= T_cy * quicksum(g[k, m] for k in range(K)) + T_nz * quicksum(
d[k, h, m] for h in range(H) for k in range(K - 1)) + T_pl * quicksum(
e[s, k, m] for s in range(-(H - 1) * ratio, S) for k in range(K)) + T_pl * quicksum(
x[i, s, k, h, m] for i in range(I) for s in range(S) for k in range(K) for h in range(H)) for m in range(M))
mdl.setObjective(obj, GRB.MINIMIZE)
mdl.optimize()
pcb_part_indices = defaultdict(list)
for idx, data in pcb_data.iterrows():
pcb_part_indices[data.part].append(idx)
assembly_info = []
for m in range(M):
partial_component_data, partial_pcb_data = copy.deepcopy(component_data), pd.DataFrame(columns=pcb_data.columns)
partial_component_data['points'] = 0
part_index = defaultdict(int)
for idx, data in component_data.iterrows():
part_index[data.part] = idx
component_result, cycle_result, feeder_slot_result = [], [], []
for k in range(K):
if abs(g[k, m].x) < 1e-3:
continue
component_result.append([-1 for _ in range(H)])
cycle_result.append(1)
feeder_slot_result.append([-1 for _ in range(H)])
for h in range(H):
for i in range(I):
for s in range(S):
if abs(x[i, s, k, h, m].x) < 1e-3:
continue
if component_result[-1][h] != -1:
assert 1
component_result[-1][h] = i
feeder_slot_result[-1][h] = slot_start + s * 2
idx = pcb_part_indices[component_data.iloc[i].part][0]
partial_pcb_data = pd.concat([partial_pcb_data, pd.DataFrame(pcb_data.iloc[idx]).T])
pcb_part_indices[component_data.iloc[i].part].pop(0)
partial_component_data.loc[i, 'points'] += 1
print(component_result)
print(cycle_result)
print(feeder_slot_result)
placement_result, head_sequence = greedy_placement_route_generation(partial_component_data, partial_pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
print('----- Placement machine ' + str(m + 1) + ' ----- ')
info = placement_info_evaluation(partial_component_data, partial_pcb_data, component_result, cycle_result,
feeder_slot_result, placement_result, head_sequence, hinter=False)
optimization_assign_result(partial_component_data, partial_pcb_data, component_result, cycle_result,
feeder_slot_result, nozzle_hinter=True, component_hinter=True, feeder_hinter=True)
info.print()
assembly_info.append(info)
print('------------------------------ ')
return assembly_info

View File

@ -42,7 +42,7 @@ def random_component_assignment(pcb_data, component_data, machine_number, estima
if assignment_result[idx][part] > 0 or idx == machine_index:
feeder_counter += 1
if component_points[part] == 0 or feeder_counter > component_data.iloc[part]['feeder-limit']:
if component_points[part] == 0 or feeder_counter > component_data.iloc[part].fdn:
continue
# feeder limit restriction
@ -98,7 +98,7 @@ def local_search_component_assignment(pcb_data, component_data, machine_number,
if optimal_assignment[machine][part_idx] or machine == swap_machine_idx:
feeder_available += 1
if feeder_available <= component_data.iloc[part_idx]['feeder-limit'] and swap_machine_idx != machine_idx:
if feeder_available <= component_data.iloc[part_idx].fdn and swap_machine_idx != machine_idx:
break
assert swap_machine_idx is not None
assignment[machine_idx][part_idx] -= r
@ -212,7 +212,7 @@ def reconfig_mutation_operation(component_data, parent, machine_number):
for machine_index in range(machine_number):
if offspring[swap_machine1][swap_component_index] < swap_points or machine_index == swap_machine2:
feeder_counter += 1
if feeder_counter > component_data.iloc[swap_component_index]['feeder-limit']:
if feeder_counter > component_data.iloc[swap_component_index].fdn:
return offspring
offspring[swap_machine1][swap_component_index] -= swap_points
@ -298,7 +298,7 @@ def evolutionary_component_assignment(pcb_data, component_data, machine_number,
def line_optimizer_reconfiguration(component_data, pcb_data, machine_number):
# === assignment of heads to modules is omitted ===
optimal_assignment, optimal_val = [], None
estimator = Estimator(task_block_weight=5) # element from list [0, 1, 2, 5, 10] task_block ~= cycle
estimator = RegressionEstimator() # element from list [0, 1, 2, 5, 10] task_block ~= cycle
# === assignment of components to heads
for i in range(5):
if i == 0:

View File

@ -5,7 +5,6 @@ def assemblyline_optimizer_spidermonkey(pcb_data, component_data):
# maximum number of groups: 5
# number of loops: 100
# food source population: 50
# mutation rate: 0.1
# crossover rate: 0.9
# mutation rate: 0.1 # crossover rate: 0.9
# computation time(s): 200
pass

View File

@ -3,52 +3,45 @@ import random
import numpy as np
from dataloader import *
from optimizer_genetic import line_optimizer_genetic
from optimizer_heuristic import line_optimizer_heuristic
from optimizer_reconfiguration import line_optimizer_reconfiguration
from optimizer_hyperheuristic import line_optimizer_hyperheuristic
from lineopt_genetic import line_optimizer_genetic
from lineopt_heuristic import line_optimizer_heuristic
from lineopt_reconfiguration import line_optimizer_reconfiguration
from lineopt_hyperheuristic import line_optimizer_hyperheuristic
from lineopt_model import line_optimizer_model
from base_optimizer.optimizer_interface import *
def optimizer(pcb_data, component_data, line_optimizer, machine_optimizer, machine_number):
if machine_number > 1:
if line_optimizer == 'hyper-heuristic':
assignment_result = line_optimizer_hyperheuristic(component_data, pcb_data, machine_number)
elif line_optimizer == "heuristic":
assignment_result = line_optimizer_heuristic(component_data, machine_number)
elif line_optimizer == "genetic":
assignment_result = line_optimizer_genetic(component_data, machine_number)
elif line_optimizer == "reconfiguration":
assignment_result = line_optimizer_reconfiguration(component_data, pcb_data, machine_number)
else:
raise 'line optimizer method is not existed'
else:
assignment_result = [[]]
for _, data in component_data.iterrows():
assignment_result[-1].append(data.points)
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data, assignment_result)
assembly_info = []
for machine_index in range(machine_number):
assembly_info.append(
base_optimizer(machine_index + 1, partial_pcb_data[machine_index], partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), method=machine_optimizer,
hinter=True))
if line_optimizer == 'hyper-heuristic' or line_optimizer == 'heuristic' or line_optimizer == 'genetic' or \
line_optimizer == 'reconfiguration':
if machine_number > 1:
if line_optimizer == 'hyper-heuristic':
assignment_result = line_optimizer_hyperheuristic(component_data, pcb_data, machine_number)
elif line_optimizer == "heuristic":
assignment_result = line_optimizer_heuristic(component_data, machine_number)
elif line_optimizer == "genetic":
assignment_result = line_optimizer_genetic(component_data, machine_number)
else:
assignment_result = line_optimizer_reconfiguration(component_data, pcb_data, machine_number)
else:
assignment_result = [[]]
for _, data in component_data.iterrows():
assignment_result[-1].append(data.points)
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data, assignment_result)
for machine_index in range(machine_number):
total_component_types = sum(1 if pt else 0 for pt in assignment_result[machine_index])
total_placement_points = sum(assignment_result[machine_index])
total_time = assembly_info[machine_index].total_time
print(f'assembly time for machine {machine_index + 1: d}: {total_time: .3f} s, total placement: '
f'{total_placement_points}, total component types {total_component_types: d}', end='')
for part_index in range(len(assignment_result[machine_index])):
if assignment_result[machine_index][part_index]:
print(', ', part_index, end='')
print('')
for machine_index in range(machine_number):
assembly_info.append(
base_optimizer(machine_index + 1, partial_pcb_data[machine_index], partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), method=machine_optimizer,
hinter=True))
elif line_optimizer == 'model':
assembly_info = line_optimizer_model(component_data, pcb_data, machine_number)
else:
raise 'line optimizer method is not existed'
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
return assembly_info
@timer_wrapper
@ -56,8 +49,10 @@ def main():
warnings.simplefilter(action='ignore', category=FutureWarning)
# 参数解析
parser = argparse.ArgumentParser(description='assembly line optimizer implementation')
parser.add_argument('--mode', default=1, type=int, help='mode: 0 -directly load pcb data without optimization '
'for data analysis, 1 -optimize pcb data')
parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data')
parser.add_argument('--auto_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--comp_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')
parser.add_argument('--machine_optimizer', default='feeder-scan', type=str, help='optimizer for single machine')
parser.add_argument('--line_optimizer', default='hyper-heuristic', type=str, help='optimizer for PCB assembly line')
@ -68,46 +63,58 @@ def main():
# 结果输出显示所有行和列
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
if params.mode == 0:
partial_pcb_data, partial_component_data, _ = load_data(params.filename)
assembly_info = []
for machine_index in range(len(partial_pcb_data)):
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = \
convert_pcbdata_to_result(partial_pcb_data[machine_index], partial_component_data[machine_index])
print('----- Placement machine ' + str(machine_index) + ' ----- ')
info = placement_info_evaluation(partial_component_data[machine_index], partial_pcb_data[machine_index],
component_result, cycle_result, feeder_slot_result, placement_result,
head_sequence)
assembly_info.append(info)
optimization_assign_result(partial_component_data[machine_index], partial_pcb_data[machine_index],
component_result, cycle_result, feeder_slot_result, nozzle_hinter=True,
component_hinter=True, feeder_hinter=True)
info.print()
print('------------------------------ ')
else:
# 加载PCB数据
partial_pcb_data, partial_component_data, _ = load_data(params.filename)
pcb_data, component_data = merge_data(partial_pcb_data, partial_component_data)
# 加载PCB数据
pcb_data, component_data, _ = load_data(params.filename, default_feeder_limit=params.feeder_limit,
cp_auto_register=params.auto_register, load_feeder_data=False) # 加载PCB数据
assembly_info = optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer,
params.machine_number)
optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, params.machine_number)
# index_list, part_list = [5, 6, 7, 8, 9, 10, 11, 12, 13], []
# for idx in index_list:
# part_list.append(component_data.iloc[idx].part)
# pcb_data = pcb_data[pcb_data['part'].isin(part_list)].reset_index(drop=True)
# component_data = component_data.iloc[index_list].reset_index(drop=True)
#
# from lineopt_hyperheuristic import DataMgr, Net
# data_mgr = DataMgr()
#
# cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
# for _, data in component_data.iterrows():
# cp_points[data.part], cp_nozzle[data.part] = data.points, data.nz
#
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
#
# net.load_state_dict(torch.load('model/net_model.pth'))
# board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
# encoding = np.array(data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
# encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
# print(f'net pred time: {net(encoding)[0, 0].item():.3f}')
# index_list, part_list = [1, 4, 8, 9, 12, 13, 14, 18, 20, 22, 23, 25, 33, 35, 38, 39, 40], []
# for idx in index_list:
# part_list.append(component_data.iloc[idx].part)
# pcb_data = pcb_data[pcb_data['part'].isin(part_list)].reset_index(drop=True)
# component_data = component_data.iloc[index_list].reset_index(drop=True)
# optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, 1)
#
# from optimizer_hyperheuristic import DataMgr, Net
# data_mgr = DataMgr()
for machine_idx, info in enumerate(assembly_info):
print(f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
# cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
# for _, data in component_data.iterrows():
# cp_points[data.part], cp_nozzle[data.part] = data.points, data.nz
# idx = 1832
# data = data_mgr.loader(file_name)
# encoding = np.array(data[0][idx])
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
#
# net.load_state_dict(torch.load('model/net_model.pth'))
# board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
# encoding = np.array(data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
# encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
# print(f'net pred time: {net(encoding)[0, 0].item():.3f}')
# with open('model/lr_model.pkl', 'rb') as f:
# lr = pickle.load(f)
#
# print('lr model train data: ', np.array(data[2:]).T[idx].reshape(1, -1))
# print('lr model pred time: ', lr.predict(np.array(data[2:]).T[idx].reshape(1, -1)))
# print('real time: ', data[-1][idx] * 3600 / data[1][idx])
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
if __name__ == '__main__':

View File

@ -1,446 +0,0 @@
import os
import pickle
import random
import numpy as np
import pandas as pd
import torch.nn
from base_optimizer.optimizer_interface import *
from generator import *
from estimator import *
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class Heuristic:
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
return -1
class LeastPoints(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_points = []
for machine_idx in range(len(cp_assign)):
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_points.append(sum([cp_points[cp_idx] for cp_idx in cp_assign[machine_idx]]))
return np.argmin(machine_points)
class LeastNzTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_nozzle = []
for machine_idx in range(len(cp_assign)):
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nozzle.append([cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]])
return np.argmin([len(set(nozzle)) for nozzle in machine_nozzle])
class LeastCpTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
return np.argmin([len(cp) for cp in cp_assign])
class LeastCpNzRatio(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_nz_type, machine_cp_type = [], []
for machine_idx in range(len(cp_assign)):
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nz_type.append([cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]])
machine_cp_type.append(cp_assign[machine_idx])
return np.argmin(
[len(machine_cp_type[machine_idx]) / (len(machine_nz_type[machine_idx]) + 1e-5) for machine_idx in
range(len(cp_assign))])
def nozzle_assignment(cp_points, cp_nozzle, cp_assign):
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for cp_idx in cp_assign:
nozzle_points[cp_nozzle[cp_idx]] += cp_points[cp_idx]
nozzle_heads[cp_nozzle[cp_idx]] = 1
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
return nozzle_heads, nozzle_points
class LeastCycle(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_cycle = []
for machine_idx, assign_component in enumerate(cp_assign):
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
machine_cycle.append(max(nozzle_points[nozzle] / head for nozzle, head in nozzle_heads.items()))
return np.argmin(machine_cycle)
class LeastNzChange(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_nozzle_change = []
for machine_idx, assign_component in enumerate(cp_assign):
if len(assign_component) == 0:
return machine_idx
heads_points = []
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
for nozzle, head in nozzle_heads.items():
for _ in range(head):
heads_points.append(nozzle_points[nozzle] / nozzle_heads[nozzle])
machine_nozzle_change.append(np.std(heads_points))
return np.argmin(machine_nozzle_change)
class LeastPickup(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_pick_up = []
for machine_idx, assign_component in enumerate(cp_assign):
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
nozzle_level, nozzle_counter = defaultdict(int), defaultdict(int)
level_points = defaultdict(int)
for cp_idx in sorted(assign_component, key=lambda x: cp_points[x], reverse=True):
nozzle, points = cp_nozzle[cp_idx], cp_points[cp_idx]
if nozzle_counter[nozzle] and nozzle_counter[nozzle] % nozzle_heads[nozzle] == 0:
nozzle_level[nozzle] += 1
level = nozzle_level[nozzle]
level_points[level] = max(level_points[level], points)
nozzle_counter[nozzle] += 1
machine_pick_up.append(sum(points for points in level_points.values()))
return np.argmin(machine_pick_up)
def generate_pattern(heuristic_map, cp_points):
"""
Generates a random pattern.
:return: The generated pattern string.
"""
return "".join([random.choice(list(heuristic_map.keys())) for _ in range(random.randrange(1, len(cp_points)))])
def crossover(parent1, parent2):
"""
Attempt to perform crossover between two chromosomes.
:param parent1: The first parent.
:param parent2: The second parent.
:return: The two individuals after crossover has been performed.
"""
point1, point2 = random.randrange(len(parent1)), random.randrange(len(parent2))
substr1, substr2 = parent1[point1:], parent2[point2:]
offspring1, offspring2 = "".join((parent1[:point1], substr2)), "".join((parent2[:point2], substr1))
return offspring1, offspring2
def mutation(heuristic_map, cp_points, individual):
"""
Attempts to mutate the individual by replacing a random heuristic in the chromosome by a generated pattern.
:param individual: The individual to mutate.
:return: The mutated individual.
"""
pattern = list(individual)
mutation_point = random.randrange(len(pattern))
pattern[mutation_point] = generate_pattern(heuristic_map, cp_points)
return ''.join(pattern)
def population_initialization(population_size, heuristic_map, cp_points):
return [generate_pattern(heuristic_map, cp_points) for _ in range(population_size)]
def convert_assignment_result(heuristic_map, cp_points, cp_nozzle, component_list, individual, machine_number):
machine_cp_assign = [[] for _ in range(machine_number)]
for idx, cp_idx in enumerate(component_list):
h = individual[idx % len(individual)]
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, machine_cp_assign)
machine_cp_assign[machine_idx].append(cp_idx)
return machine_cp_assign
def cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height, component_list,
individual, machine_number, estimator):
machine_cp_assign = convert_assignment_result(heuristic_map, cp_points, cp_nozzle, component_list,
individual, machine_number)
objective_val = []
for machine_idx in range(machine_number):
machine_cp_points, machine_cp_nozzle = defaultdict(int), defaultdict(str)
for cp_idx in machine_cp_assign[machine_idx]:
machine_cp_points[cp_idx] = cp_points[cp_idx]
machine_cp_nozzle[cp_idx] = cp_nozzle[cp_idx]
objective_val.append(estimator.neural_network(machine_cp_points, machine_cp_nozzle, board_width, board_height))
# objective_val.append(estimator.heuristic_genetic(machine_cp_points, machine_cp_nozzle))
return objective_val
def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
heuristic_map = {
'p': LeastPoints,
'n': LeastNzChange,
'c': LeastCpTypes,
'r': LeastCpNzRatio,
'k': LeastCycle,
'g': LeastNzChange,
'u': LeastPickup,
}
# genetic-based hyper-heuristic
crossover_rate, mutation_rate = 0.8, 0.1
population_size, n_generations = 20, 100
n_iterations = 10
estimator = Estimator()
best_val, best_component_list = None, None
best_individual = None
division_component_data = pd.DataFrame(columns=component_data.columns)
for _, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
data['feeder-limit'], data['points'] = 1, int(data['points'] / data['feeder-limit'])
for _ in range(feeder_limit):
division_component_data = pd.concat([division_component_data, pd.DataFrame(data).T])
division_component_data = division_component_data.reset_index()
component_list = [idx for idx, data in division_component_data.iterrows() if data['points'] > 0]
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
for idx, data in division_component_data.iterrows():
cp_points[idx], cp_nozzle[idx] = data['points'], data['nz']
board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
with tqdm(total=n_generations * n_iterations) as pbar:
pbar.set_description('hyper-heuristic algorithm process for PCB assembly line balance')
for _ in range(n_iterations):
random.shuffle(component_list)
new_population = []
population = population_initialization(population_size, heuristic_map, cp_points)
# calculate fitness value
pop_val = []
for individual in population:
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
component_list, individual, machine_number, estimator)
pop_val.append(max(val))
for _ in range(n_generations):
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
population += new_population
for individual in new_population:
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
component_list, individual, machine_number, estimator)
pop_val.append(max(val))
# min-max convert
max_val = max(pop_val)
sel_pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(sel_pop_val) + 1e-10
sel_pop_val = [v / sum_pop_val + 1e-3 for v in sel_pop_val]
# crossover and mutation
new_population = []
for pop in range(population_size):
if pop % 2 == 0 and np.random.random() < crossover_rate:
index1 = roulette_wheel_selection(sel_pop_val)
while True:
index2 = roulette_wheel_selection(sel_pop_val)
if index1 != index2:
break
offspring1, offspring2 = crossover(population[index1], population[index2])
if np.random.random() < mutation_rate:
offspring1 = mutation(heuristic_map, cp_points, offspring1)
if np.random.random() < mutation_rate:
offspring2 = mutation(heuristic_map, cp_points, offspring2)
new_population.append(offspring1)
new_population.append(offspring2)
pbar.update(1)
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
component_list, population[0], machine_number, estimator)
val = max(val)
if best_val is None or val < best_val:
best_val = val
best_individual = population[0]
best_component_list = component_list.copy()
machine_cp_points = convert_assignment_result(heuristic_map, cp_points, cp_nozzle, best_component_list,
best_individual, machine_number)
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
best_component_list, best_individual, machine_number, estimator)
print(val)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for cp_idx in machine_cp_points[machine_idx]:
idx = division_component_data.iloc[cp_idx]['index']
assignment_result[machine_idx][idx] += cp_points[cp_idx]
print(assignment_result)
return assignment_result
if __name__ == '__main__':
warnings.simplefilter(action='ignore', category=FutureWarning)
parser = argparse.ArgumentParser(description='network training implementation')
parser.add_argument('--train', default=True, type=bool, help='determine whether training the network')
parser.add_argument('--save', default=True, type=bool,
help='determine whether saving the parameters of network, linear regression model, etc.')
parser.add_argument('--overwrite', default=False, type=bool,
help='determine whether overwriting the training and testing data')
parser.add_argument('--train_file', default='train_data.txt', type=str, help='training file path')
parser.add_argument('--test_file', default='test_data.txt', type=str, help='testing file path')
parser.add_argument('--num_epochs', default=8000, type=int, help='number of epochs for training process')
parser.add_argument('--batch_size', default=10000, type=int, help='size of training batch')
parser.add_argument('--lr', default=1e-5, type=float, help='learning rate for the network')
params = parser.parse_args()
data_mgr = DataMgr()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if params.overwrite:
file = {params.train_file: params.batch_size,
params.test_file: params.batch_size // data_mgr.get_update_round() // 5}
for file_name, file_batch_size in file.items():
with open('opt/' + file_name, 'a') as f:
for _ in range(int(file_batch_size)):
mode = file_name.split('.')[0].split('_')[0]
pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data
# data_mgr.remover() # remove the last saved data
# data_mgr.saver('data/' + file_name, pcb_data) # save new data
info = base_optimizer(1, pcb_data, component_data,
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method='feeder-scan', hinter=True)
data_mgr.recorder(f, info, pcb_data, component_data)
f.close()
net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
data = data_mgr.loader('opt/' + params.train_file)
if params.train:
x_fit, y_fit = np.array(data[2:]).T, np.array([data[1]]).T
lr = LinearRegression()
lr.fit(x_fit, y_fit)
x_train = np.array(data[0][::data_mgr.get_update_round()])
# y_train = lr.predict(x_fit[::data_mgr.get_update_round()])
y_train = np.array(data[1][::data_mgr.get_update_round()])
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=params.lr)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1)
loss_func = torch.nn.MSELoss()
for epoch in range(params.num_epochs):
pred = net(x_train)
loss = loss_func(pred, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# scheduler.step()
if epoch % 100 == 0:
print('Epoch: ', epoch, ', Loss: ', loss.item())
if loss.item() < 1e-4:
break
net_predict = net(x_train).view(-1)
pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
pred_error = np.array([])
for t1, t2 in np.nditer([pred_time, real_time]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
mse = np.linalg.norm((net_predict - y_train.view(-1)).cpu().detach().numpy())
print(f'mean square error for training data result : {mse: 2f} ')
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
torch.save(net.state_dict(), 'model/net_model.pth')
with open('model/lr_model.pkl', 'wb') as f:
pickle.dump(lr, f)
torch.save(optimizer.state_dict(), 'model/optimizer_state.pth')
else:
net.load_state_dict(torch.load('model/net_model.pth'))
# optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
# optimizer.load_state_dict(torch.load('model/optimizer_state.pth'))
data = data_mgr.loader('opt/' + params.test_file)
x_test, y_test = np.array(data[0]), np.array(data[1])
# x_test, y_test = np.array(data[0]), lr.predict(np.array(data[2:]).T)
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device)
net.eval()
with torch.no_grad():
pred_time = net(x_test).view(-1).cpu().detach().numpy()
x_test = x_test.cpu().detach().numpy()
over_set = []
pred_idx, pred_error = 0, np.array([])
for t1, t2 in np.nditer([pred_time, y_test.reshape(-1)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
if pred_error[-1] > 5:
over_set.append(pred_idx + 1)
print(f'\033[0;31;31midx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
f'gap: {pred_error[-1]: .3f}\033[0m')
else:
pass
# print(f'idx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, gap: {pred_error[-1]: .3f}')
pred_idx += 1
print('over:', over_set)
print('size:', len(over_set))
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .3f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .3f}% ')
mse = np.linalg.norm(pred_time - y_test.reshape(-1))
print(f'mean square error for test data result : {mse: 2f} ')