增加超启发式线体优化算法

This commit is contained in:
2024-05-17 22:52:49 +08:00
parent 6fa1f53f69
commit 7c9a900b95
13 changed files with 1731 additions and 1109 deletions

View File

@ -1,160 +1,54 @@
import random
import numpy as np
from dataloader import *
from optimizer_genetic import *
from optimizer_heuristic import *
from optimizer_reconfiguration import *
from optimizer_genetic import line_optimizer_genetic
from optimizer_heuristic import line_optimizer_heuristic
from optimizer_reconfiguration import line_optimizer_reconfiguration
from optimizer_hyperheuristic import line_optimizer_hyperheuristic
from base_optimizer.optimizer_interface import *
def deviation(data):
assert len(data) > 0
average, variance = sum(data) / len(data), 0
for v in data:
variance += (v - average) ** 2
return variance / len(data)
def optimizer(pcb_data, component_data, line_optimizer, machine_optimizer, machine_number):
if line_optimizer == "heuristic":
assignment_result = assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number)
elif line_optimizer == "genetic":
assignment_result = assemblyline_optimizer_genetic(pcb_data, component_data, machine_number)
elif line_optimizer == "reconfiguration":
assignment_result = reconfiguration_optimizer(pcb_data, component_data, machine_number)
if machine_number > 1:
if line_optimizer == 'hyper-heuristic':
assignment_result = line_optimizer_hyperheuristic(component_data, pcb_data, machine_number)
elif line_optimizer == "heuristic":
assignment_result = line_optimizer_heuristic(component_data, machine_number)
elif line_optimizer == "genetic":
assignment_result = line_optimizer_genetic(component_data, machine_number)
elif line_optimizer == "reconfiguration":
assignment_result = line_optimizer_reconfiguration(component_data, pcb_data, machine_number)
else:
raise 'line optimizer method is not existed'
else:
return
assignment_result = [[]]
for _, data in component_data.iterrows():
assignment_result[-1].append(data.points)
assignment_result_cpy = copy.deepcopy(assignment_result)
placement_points, assembly_info = [], []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data, assignment_result)
assembly_info = []
for machine_index in range(machine_number):
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
partial_component_data[machine_index] = component_data.copy(deep=True)
placement_points.append(sum(assignment_result[machine_index]))
assembly_info.append(
base_optimizer(machine_index + 1, partial_pcb_data[machine_index], partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), method=machine_optimizer,
hinter=True))
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
if feeder_points[machine_index] == 0:
continue
arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1)
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] = arg_feeder
feeder_limit -= arg_feeder
for machine_index in range(machine_number):
if feeder_limit <= 0:
break
if feeder_points[machine_index] == 0:
continue
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] += 1
feeder_limit -= 1
for machine_index in range(machine_number):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index, 'feeder-limit'] > 0
# === assign placements ===
component_machine_index = [0 for _ in range(len(component_data))]
for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == 0:
component_machine_index[part_index] += 1
machine_index += 1
else:
break
assignment_result[machine_index][part_index] -= 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
# === adjust the number of available feeders for single optimization separately ===
for machine_index, data in partial_pcb_data.items():
data = data.reset_index(drop=True)
if len(data) == 0:
continue
part_info = [] # part info list(part index, part points, available feeder-num, upper feeder-num)
for part_index, cp_data in partial_component_data[machine_index].iterrows():
if assignment_result_cpy[machine_index][part_index]:
part_info.append(
[part_index, assignment_result_cpy[machine_index][part_index], 1, cp_data['feeder-limit']])
part_info = sorted(part_info, key=lambda x: x[1], reverse=True)
start_index, end_index = 0, min(max_head_index - 1, len(part_info) - 1)
while start_index < len(part_info):
assign_part_point, assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
assign_part_index.append(idx_)
variance = deviation(assign_part_point)
while start_index <= end_index:
part_info_index = assign_part_index[np.argmax(assign_part_point)]
if part_info[part_info_index][2] < part_info[part_info_index][3]: # 供料器数目上限的限制
part_info[part_info_index][2] += 1
end_index -= 1
new_assign_part_point, new_assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
new_assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
new_assign_part_index.append(idx_)
new_variance = deviation(new_assign_part_point)
if variance < new_variance:
part_info[part_info_index][2] -= 1
end_index += 1
break
variance = new_variance
assign_part_index, assign_part_point = new_assign_part_index.copy(), new_assign_part_point.copy()
else:
break
start_index = end_index + 1
end_index = min(start_index + max_head_index - 1, len(part_info) - 1)
# update available feeder number
max_avl_feeder = max(part_info, key=lambda x: x[2])[2]
for info in part_info:
partial_component_data[machine_index].loc[info[0], 'feeder-limit'] = math.ceil(info[2] / max_avl_feeder)
assembly_info.append(base_optimizer(machine_index + 1, data, partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method=machine_optimizer, hinter=True))
with open('model/lr_model.pkl', 'rb') as f:
lr = pickle.load(f)
average_time, standard_deviation_time = sum(
[assembly_info[m].placement_time for m in range(machine_number)]) / machine_number, 0
for machine_index in range(machine_number):
total_component_types = sum(1 if pt else 0 for pt in assignment_result_cpy[machine_index])
placement_time = assembly_info[machine_index].placement_time
total_component_types = sum(1 if pt else 0 for pt in assignment_result[machine_index])
total_placement_points = sum(assignment_result[machine_index])
total_time = assembly_info[machine_index].total_time
print(f'assembly time for machine {machine_index + 1: d}: {total_time: .3f} s, total placement: '
f'{total_placement_points}, total component types {total_component_types: d}', end='')
for part_index in range(len(assignment_result[machine_index])):
if assignment_result[machine_index][part_index]:
print(', ', part_index, end='')
print('')
regression_time = lr.coef_[0][0] * assembly_info[machine_index].cycle_counter + lr.coef_[0][1] * assembly_info[
machine_index].nozzle_change_counter + lr.coef_[0][2] * assembly_info[machine_index].pickup_counter + \
lr.coef_[0][3] * assembly_info[machine_index].pickup_movement + lr.coef_[0][4] * \
placement_points[machine_index] + lr.intercept_[0]
print(f'assembly time for machine {machine_index + 1: d}: {placement_time: .3f} s, total placement: '
f'{placement_points[machine_index]}, total component types {total_component_types: d}', end=', ')
print(f'regression time: {regression_time: .3f} s')
standard_deviation_time += pow(placement_time - average_time, 2)
standard_deviation_time /= machine_number
standard_deviation_time = math.sqrt(standard_deviation_time)
print(f'finial assembly time: {max(info.placement_time for info in assembly_info): .3f} s, '
f'standard deviation: {standard_deviation_time: .3f}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
@timer_wrapper
@ -165,10 +59,10 @@ def main():
parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data')
parser.add_argument('--auto_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')
parser.add_argument('--machine_optimizer', default='feeder_scan', type=str, help='optimizer for single machine')
parser.add_argument('--line_optimizer', default='genetic', type=str, help='optimizer for PCB Assembly Line')
parser.add_argument('--feeder_limit', default=1, type=int,
help='the upper feeder limit for each type of component')
parser.add_argument('--machine_optimizer', default='feeder-scan', type=str, help='optimizer for single machine')
parser.add_argument('--line_optimizer', default='hyper-heuristic', type=str, help='optimizer for PCB assembly line')
# parser.add_argument('--line_optimizer', default='genetic', type=str, help='optimizer for PCB assembly line')
parser.add_argument('--feeder_limit', default=1, type=int, help='the upper feeder limit for each type of component')
params = parser.parse_args()
# 结果输出显示所有行和列
@ -181,6 +75,40 @@ def main():
optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, params.machine_number)
# index_list, part_list = [1, 4, 8, 9, 12, 13, 14, 18, 20, 22, 23, 25, 33, 35, 38, 39, 40], []
# for idx in index_list:
# part_list.append(component_data.iloc[idx].part)
# pcb_data = pcb_data[pcb_data['part'].isin(part_list)].reset_index(drop=True)
# component_data = component_data.iloc[index_list].reset_index(drop=True)
# optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, 1)
#
# from optimizer_hyperheuristic import DataMgr, Net
# data_mgr = DataMgr()
# cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
# for _, data in component_data.iterrows():
# cp_points[data.part], cp_nozzle[data.part] = data.points, data.nz
# idx = 1832
# data = data_mgr.loader(file_name)
# encoding = np.array(data[0][idx])
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
#
# net.load_state_dict(torch.load('model/net_model.pth'))
# board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
# encoding = np.array(data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
# encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
# print(f'net pred time: {net(encoding)[0, 0].item():.3f}')
# with open('model/lr_model.pkl', 'rb') as f:
# lr = pickle.load(f)
#
# print('lr model train data: ', np.array(data[2:]).T[idx].reshape(1, -1))
# print('lr model pred time: ', lr.predict(np.array(data[2:]).T[idx].reshape(1, -1)))
# print('real time: ', data[-1][idx] * 3600 / data[1][idx])
if __name__ == '__main__':
main()