-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path4_mpi.cpp
More file actions
209 lines (171 loc) · 6.96 KB
/
4_mpi.cpp
File metadata and controls
209 lines (171 loc) · 6.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <vector>
#include <algorithm>
#include <chrono>
#include <limits>
#include <omp.h>
#include <mpi.h>
using namespace std;
using namespace std::chrono;
// Function to generate permutations
void generate_permutations(vector<int>& nodes, vector<vector<int>>& permutations) {
sort(nodes.begin(), nodes.end()); // Sort nodes to ensure all permutations
do {
vector<int> perm = {0};
perm.insert(perm.end(), nodes.begin(), nodes.end());
perm.push_back(0);
permutations.push_back(perm);
} while (next_permutation(nodes.begin(), nodes.end()));
}
// Function to adjust permutations based on constraints
void adjust_permutations(vector<vector<int>>& permutations, const vector<vector<int>>& adj_matrix, const unordered_map<int, int>& demands, int vehicle_capacity, int num_stops) {
#pragma omp parallel for
for (size_t perm_index = 0; perm_index < permutations.size(); ++perm_index) {
auto& perm = permutations[perm_index];
int current_capacity = 0;
int stops = 0;
for (auto it = perm.begin() + 1; it != perm.end() - 1; ++it) {
int from = *(it - 1);
int to = *it;
// Check if there is no path between consecutive nodes
if (adj_matrix[from][to] == -1) {
it = perm.insert(it, 0);
current_capacity = 0; // Reset capacity after returning to depot
stops = 0; // Reset stop counter
continue;
}
// Check if vehicle capacity is exceeded
if (demands.find(to) != demands.end()) {
current_capacity += demands.at(to);
stops++; // Increment stop counter
}
if (current_capacity > vehicle_capacity || stops > num_stops) {
if (*(it - 1) != 0) { // Avoid repeated depot returns
it = perm.insert(it, 0);
current_capacity = demands.at(to); // Reset capacity with current node demand
stops = 1; // Reset stop counter with current stop
}
}
}
}
}
// Function to find the minimum cost route
vector<int> find_min_cost_route(const vector<vector<int>>& permutations, const vector<vector<int>>& adj_matrix, int& local_min_cost) {
vector<int> local_min_cost_route;
local_min_cost = numeric_limits<int>::max();
#pragma omp parallel
{
vector<int> thread_min_cost_route;
int thread_min_cost = numeric_limits<int>::max();
#pragma omp for nowait
for (size_t i = 0; i < permutations.size(); ++i) {
const auto& perm = permutations[i];
int cost = 0;
bool valid_route = true;
for (size_t j = 0; j < perm.size() - 1; ++j) {
int from = perm[j];
int to = perm[j + 1];
if (adj_matrix[from][to] == -1) {
valid_route = false;
break;
}
cost += adj_matrix[from][to];
}
if (valid_route && cost < thread_min_cost) {
thread_min_cost = cost;
thread_min_cost_route = perm;
}
}
#pragma omp critical
{
if (thread_min_cost < local_min_cost) {
local_min_cost = thread_min_cost;
local_min_cost_route = thread_min_cost_route;
}
}
}
return local_min_cost_route;
}
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
auto start = high_resolution_clock::now();
ifstream infile("grafo.txt");
if (!infile) {
cerr << "Cannot open grafo.txt" << endl;
MPI_Finalize();
return 1;
}
int num_nodes;
infile >> num_nodes;
unordered_map<int, int> demands;
for (int i = 1; i < num_nodes; ++i) {
int node, demand;
infile >> node >> demand;
demands[node] = demand;
}
vector<vector<int>> adj_matrix(num_nodes, vector<int>(num_nodes, -1));
int num_edges;
infile >> num_edges;
for (int i = 0; i < num_edges; ++i) {
int from, to, weight;
infile >> from >> to >> weight;
adj_matrix[from][to] = weight;
}
infile.close();
vector<int> nodes;
for (int i = 1; i < num_nodes; ++i) {
nodes.push_back(i);
}
vector<vector<int>> all_permutations;
// Generate permutations
generate_permutations(nodes, all_permutations);
// Split permutations among MPI processes
int chunk_size = all_permutations.size() / world_size;
int start_index = world_rank * chunk_size;
int end_index = (world_rank == world_size - 1) ? all_permutations.size() : start_index + chunk_size;
vector<vector<int>> local_permutations(all_permutations.begin() + start_index, all_permutations.begin() + end_index);
int vehicle_capacity = 15;
int num_stops = 5;
// Adjust permutations
adjust_permutations(local_permutations, adj_matrix, demands, vehicle_capacity, num_stops);
// Find minimum cost route
int local_min_cost;
vector<int> local_min_cost_route = find_min_cost_route(local_permutations, adj_matrix, local_min_cost);
// Gather all results
int global_min_cost;
MPI_Allreduce(&local_min_cost, &global_min_cost, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
// Determine which process found the global minimum
int min_cost_rank = (local_min_cost == global_min_cost) ? world_rank : -1;
int global_min_cost_rank;
MPI_Allreduce(&min_cost_rank, &global_min_cost_rank, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
// Communicate the size of the route before sending the actual route
int local_min_cost_route_size = local_min_cost_route.size();
MPI_Bcast(&local_min_cost_route_size, 1, MPI_INT, global_min_cost_rank, MPI_COMM_WORLD);
// Resize global_min_cost_route to the correct size
vector<int> global_min_cost_route(local_min_cost_route_size, 0);
// Send the local minimum cost route from the process that found it
if (world_rank == global_min_cost_rank) {
MPI_Send(local_min_cost_route.data(), local_min_cost_route_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
// Root process receives the global minimum cost route
if (world_rank == 0) {
MPI_Recv(global_min_cost_route.data(), local_min_cost_route_size, MPI_INT, global_min_cost_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
cout << "Menor custo encontrado: " << global_min_cost << endl;
cout << "Rota de menor custo: ";
for (int node : global_min_cost_route) {
cout << node << " ";
}
cout << endl;
auto end = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(end - start).count();
cout << "Tempo total de execução: " << duration << " ms." << endl;
}
MPI_Finalize();
return 0;
}