增加超启发式线体优化算法
This commit is contained in:
@ -49,7 +49,7 @@ def selective_crossover(component_points, component_feeders, mother, father, mac
|
||||
one_counter, feasible_cut_line = 0, []
|
||||
|
||||
idx = 0
|
||||
for part_index, points in component_points:
|
||||
for part_index, points in component_points.items():
|
||||
one_counter = 0
|
||||
|
||||
idx_, mother_cut_line, father_cut_line = 0, [-1], [-1]
|
||||
@ -131,13 +131,12 @@ def selective_crossover(component_points, component_feeders, mother, father, mac
|
||||
return offspring1, offspring2
|
||||
|
||||
|
||||
def cal_individual_val(component_points, component_feeders, component_nozzle, machine_number, individual, data_mgr, net):
|
||||
def cal_individual_val(component_points, component_nozzle, machine_number, individual, estimator):
|
||||
idx, objective_val = 0, []
|
||||
machine_component_points = [[] for _ in range(machine_number)]
|
||||
nozzle_component_points = defaultdict(list)
|
||||
|
||||
# decode the component allocation
|
||||
for comp_idx, points in component_points:
|
||||
for part_index, points in component_points.items():
|
||||
component_gene = individual[idx: idx + points + machine_number - 1]
|
||||
machine_idx, component_counter = 0, 0
|
||||
for gene in component_gene:
|
||||
@ -150,108 +149,19 @@ def cal_individual_val(component_points, component_feeders, component_nozzle, ma
|
||||
machine_component_points[-1].append(component_counter)
|
||||
idx += (points + machine_number - 1)
|
||||
|
||||
nozzle_component_points[component_nozzle[comp_idx]] = [0] * len(component_points) # 初始化元件-吸嘴点数列表
|
||||
|
||||
# ======== 新加的开始 ========
|
||||
objective_val = 0
|
||||
for machine_idx in range(machine_number):
|
||||
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
|
||||
for comp_idx, _ in component_points:
|
||||
if machine_component_points[machine_idx][comp_idx] == 0:
|
||||
continue
|
||||
cp_points['C' + str(comp_idx)] = machine_component_points[machine_idx][comp_idx]
|
||||
cp_nozzle['C' + str(comp_idx)] = component_nozzle[comp_idx]
|
||||
|
||||
encoding = np.array(data_mgr.encode(cp_points, cp_nozzle, 45, 150))
|
||||
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
|
||||
# pred_time = net(encoding)[0, 0].item()
|
||||
# objective_val.append(pred_time * sum(points for points in cp_points.values()))
|
||||
objective_val.append(net(encoding)[0, 0].item())
|
||||
|
||||
return objective_val, machine_component_points
|
||||
# ======== 新加的结束(以下内容弃用) =====
|
||||
for comp_idx, points in component_points:
|
||||
nozzle_component_points[component_nozzle[comp_idx]][comp_idx] = points
|
||||
|
||||
for machine_idx in range(machine_number):
|
||||
nozzle_points = defaultdict(int)
|
||||
for idx, nozzle in component_nozzle.items():
|
||||
if component_points[idx] == 0:
|
||||
continue
|
||||
nozzle_points[nozzle] += machine_component_points[machine_idx][idx]
|
||||
|
||||
machine_points = sum(machine_component_points[machine_idx]) # num of placement points
|
||||
if machine_points == 0:
|
||||
continue
|
||||
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
|
||||
|
||||
# assignments of nozzles to heads
|
||||
wl = 0 # num of workload
|
||||
total_heads = (1 + ul) * max_head_index - len(nozzle_points)
|
||||
nozzle_heads = defaultdict(int)
|
||||
for nozzle in nozzle_points.keys():
|
||||
if nozzle_points[nozzle] == 0:
|
||||
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
|
||||
for part_index, points in enumerate(machine_component_points[machine_idx]):
|
||||
if points == 0:
|
||||
continue
|
||||
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / machine_points * total_heads)
|
||||
nozzle_heads[nozzle] += 1
|
||||
|
||||
total_heads = (1 + ul) * max_head_index
|
||||
for heads in nozzle_heads.values():
|
||||
total_heads -= heads
|
||||
|
||||
while True:
|
||||
nozzle = max(nozzle_heads, key=lambda x: nozzle_points[x] / nozzle_heads[x])
|
||||
if total_heads == 0:
|
||||
break
|
||||
nozzle_heads[nozzle] += 1
|
||||
total_heads -= 1
|
||||
|
||||
# averagely assign placements to heads
|
||||
heads_placement = []
|
||||
for nozzle in nozzle_heads.keys():
|
||||
points = math.floor(nozzle_points[nozzle] / nozzle_heads[nozzle])
|
||||
|
||||
heads_placement += [[nozzle, points] for _ in range(nozzle_heads[nozzle])]
|
||||
nozzle_points[nozzle] -= (nozzle_heads[nozzle] * points)
|
||||
for idx in range(len(heads_placement) - 1, -1, -1):
|
||||
if nozzle_points[nozzle] <= 0:
|
||||
break
|
||||
nozzle_points[nozzle] -= 1
|
||||
heads_placement[idx][1] += 1
|
||||
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
|
||||
|
||||
# the number of pick-up operations
|
||||
# (under the assumption of the number of feeder available for each comp. type is equal 1)
|
||||
pl = 0
|
||||
heads_placement_points = [0 for _ in range(max_head_index)]
|
||||
while True:
|
||||
head_assign_point = []
|
||||
for head in range(max_head_index):
|
||||
if heads_placement_points[head] != 0 or heads_placement[head] == 0:
|
||||
continue
|
||||
|
||||
nozzle, points = heads_placement[head]
|
||||
max_comp_index = np.argmax(nozzle_component_points[nozzle])
|
||||
|
||||
heads_placement_points[head] = min(points, nozzle_component_points[nozzle][max_comp_index])
|
||||
nozzle_component_points[nozzle][max_comp_index] -= heads_placement_points[head]
|
||||
|
||||
head_assign_point.append(heads_placement_points[head])
|
||||
|
||||
min_points_list = list(filter(lambda x: x > 0, heads_placement_points))
|
||||
if len(min_points_list) == 0 or len(head_assign_point) == 0:
|
||||
break
|
||||
|
||||
pl += max(head_assign_point)
|
||||
|
||||
for head in range(max_head_index):
|
||||
heads_placement[head][1] -= min(min_points_list)
|
||||
heads_placement_points[head] -= min(min_points_list)
|
||||
|
||||
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
|
||||
for idx in range(len(heads_placement) // max_head_index):
|
||||
wl += heads_placement[idx][1]
|
||||
objective_val.append(T_pp * machine_points + T_tr * wl + T_nc * ul + T_pl * pl)
|
||||
|
||||
cp_points[part_index], cp_nozzle[part_index] = points, component_nozzle[part_index]
|
||||
# objective_val = max(objective_val, estimator.neural_network(cp_points, cp_nozzle, 237.542, 223.088))
|
||||
objective_val = max(objective_val, estimator.heuristic_genetic(cp_points, cp_nozzle))
|
||||
return objective_val, machine_component_points
|
||||
|
||||
|
||||
@ -276,35 +186,25 @@ def individual_convert(component_points, individual):
|
||||
return machine_component_points
|
||||
|
||||
|
||||
def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
|
||||
def line_optimizer_genetic(component_data, machine_number):
|
||||
# basic parameter
|
||||
# crossover rate & mutation rate: 80% & 10%
|
||||
# crossover rate & mutation rate: 80% & 10%cizh
|
||||
# population size: 200
|
||||
# the number of generation: 500
|
||||
crossover_rate, mutation_rate = 0.8, 0.1
|
||||
population_size, n_generations = 200, 500
|
||||
|
||||
estimator = Estimator()
|
||||
# the number of placement points, the number of available feeders, and nozzle type of component respectively
|
||||
component_points, component_feeders, component_nozzle = defaultdict(int), defaultdict(int), defaultdict(str)
|
||||
for data in pcb_data.iterrows():
|
||||
part_index = component_data[component_data['part'] == data[1]['part']].index.tolist()[0]
|
||||
nozzle = component_data.loc[part_index]['nz']
|
||||
|
||||
component_points[part_index] += 1
|
||||
component_feeders[part_index] = component_data.loc[part_index]['feeder-limit']
|
||||
component_nozzle[part_index] = nozzle
|
||||
|
||||
component_points = sorted(component_points.items(), key=lambda x: x[0]) # 决定染色体排列顺序
|
||||
data_mgr = DataMgr()
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
|
||||
|
||||
net.load_state_dict(torch.load('model/net_model.pth'))
|
||||
# optimizer = torch.optim.Adam(net.parameters(), lr=0.1)
|
||||
# optimizer.load_state_dict(torch.load('optimizer_state.pth'))
|
||||
cp_points, cp_feeders, cp_nozzle = defaultdict(int), defaultdict(int), defaultdict(int)
|
||||
for part_index, data in component_data.iterrows():
|
||||
cp_points[part_index] += data['points']
|
||||
cp_feeders[part_index] = data['feeder-limit']
|
||||
cp_nozzle[part_index] = data['nz']
|
||||
|
||||
# population initialization
|
||||
population = selective_initialization(component_points, component_feeders, population_size, machine_number)
|
||||
population = selective_initialization(sorted(cp_points.items(), key=lambda x: x[0]), cp_feeders, population_size,
|
||||
machine_number)
|
||||
with tqdm(total=n_generations) as pbar:
|
||||
pbar.set_description('genetic algorithm process for PCB assembly line balance')
|
||||
|
||||
@ -313,9 +213,8 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
|
||||
# calculate fitness value
|
||||
pop_val = []
|
||||
for individual in population:
|
||||
val, assigned_points = cal_individual_val(component_points, component_feeders, component_nozzle,
|
||||
machine_number, individual, data_mgr, net)
|
||||
pop_val.append(max(val))
|
||||
val, assigned_points = cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)
|
||||
pop_val.append(val)
|
||||
|
||||
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
|
||||
population = [population[idx] for idx in select_index]
|
||||
@ -323,9 +222,8 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
|
||||
|
||||
population += new_population
|
||||
for individual in new_population:
|
||||
val, _ = cal_individual_val(component_points, component_feeders, component_nozzle, machine_number,
|
||||
individual, data_mgr, net)
|
||||
pop_val.append(max(val))
|
||||
val, _ = cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)
|
||||
pop_val.append(val)
|
||||
|
||||
# min-max convert
|
||||
max_val = max(pop_val)
|
||||
@ -343,14 +241,14 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
|
||||
if index1 != index2:
|
||||
break
|
||||
|
||||
offspring1, offspring2 = selective_crossover(component_points, component_feeders,
|
||||
offspring1, offspring2 = selective_crossover(cp_points, cp_feeders,
|
||||
population[index1], population[index2], machine_number)
|
||||
|
||||
if np.random.random() < mutation_rate:
|
||||
offspring1 = constraint_swap_mutation(component_points, offspring1, machine_number)
|
||||
offspring1 = constraint_swap_mutation(cp_points, offspring1, machine_number)
|
||||
|
||||
if np.random.random() < mutation_rate:
|
||||
offspring2 = constraint_swap_mutation(component_points, offspring2, machine_number)
|
||||
offspring2 = constraint_swap_mutation(cp_points, offspring2, machine_number)
|
||||
|
||||
new_population.append(offspring1)
|
||||
new_population.append(offspring2)
|
||||
@ -358,8 +256,7 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
|
||||
pbar.update(1)
|
||||
|
||||
best_individual = population[np.argmax(pop_val)]
|
||||
val, assignment_result = cal_individual_val(component_points, component_feeders, component_nozzle, machine_number,
|
||||
best_individual, data_mgr, net)
|
||||
val, assignment_result = cal_individual_val(cp_points, cp_nozzle, machine_number, best_individual, estimator)
|
||||
|
||||
print('final value: ', val)
|
||||
# available feeder check
|
||||
|
Reference in New Issue
Block a user