-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathhybrid.cpp
More file actions
522 lines (451 loc) · 16 KB
/
hybrid.cpp
File metadata and controls
522 lines (451 loc) · 16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
#include <fstream>
#include <unordered_map>
#include <string>
#include <cctype>
#include <iostream>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <queue>
#include <cstring>
#include <mpi.h>
#define REQ_TAG 1
#define ASSIGN_TAG 2
using namespace std;
vector<string> readers_q;
vector<vector<pair<string, int>>> send_buffers;
vector<vector<pair<string, int>>> reducer_queues;
unordered_map<string, int> global_counts;
size_t total_words;
size_t files_remain;
int num_reducers;
int num_readers;
int readers_avail;
int total_ranks;
int num_mappers;
omp_lock_t readers_lock;
vector<omp_lock_t> mappers_locks;
vector<omp_lock_t> reducer_locks;
omp_lock_t global_counts_lock;
void process_word(string &w) {
// Remove punctuation and non-ascii chars at beginning
while (!w.empty()) {
signed char c = w.front();
if (c < 0 || ispunct(c)) {
w.erase(0, 1);
continue;
}
break;
}
// Remove punctuation and non-ascii chars at end
while (!w.empty()) {
signed char c = w.back();
if (c < 0 || ispunct(c)) {
w.pop_back();
continue;
}
break;
}
// Convert all letters to lowercase
for (char &ch : w) {
unsigned char c = static_cast<unsigned char>(ch);
if (isupper(c)) {
ch = tolower(c);
}
}
}
void read_file (char* fname) {
#pragma omp atomic
readers_avail--;
size_t wc = 0;
ifstream fin(fname);
if (!fin) {
fprintf(stderr, "error: unable to open input file: %s\n", fname);
exit(1);
}
// Process words in chunks to reduce locking
const int chunk_size = 1024; // select the best chunk size
vector<string> words;
words.reserve(chunk_size);
string word;
while (fin >> word) {
process_word(word);
if (!word.empty()) { // avoid pushing empty strings
wc++;
words.push_back(word);
}
}
omp_set_lock(&readers_lock);
readers_q.insert(readers_q.end(), make_move_iterator(words.begin()), make_move_iterator(words.end()));
omp_unset_lock(&readers_lock);
#pragma omp atomic
total_words += wc;
#pragma omp atomic
files_remain--;
#pragma omp atomic
readers_avail++;
}
int hash_str(string s, int R) {
int sum = 0;
for (unsigned char c : s) {
sum += c;
}
return sum % R;
}
void mapping_step() {
unordered_map<string, int> buckets;
// Grab elemnts from the work q in chunks
const int chunk_size = 1024; // find which chunk size works the best
vector<string> working_batch;
working_batch.reserve(chunk_size);
while (true) {
working_batch.clear();
// Lock and grab new chunk of elements if queue is not empty
omp_set_lock(&readers_lock);
for (size_t i = 0; i < chunk_size && !readers_q.empty(); ++i) {
working_batch.push_back(readers_q.back());
readers_q.pop_back();
}
omp_unset_lock(&readers_lock);
if (!working_batch.empty()) {
// Queue not empty -- process new elements
for (size_t i = 0; i < working_batch.size(); ++i) {
buckets[working_batch[i]]++;
}
}
else {
int remaining;
// Shared global variable -- must be read atomically
#pragma omp atomic read
remaining = files_remain;
if (remaining == 0) {
// Queue empty and all files are processed
break;
}
// Mappers are ahead of readers
#pragma omp taskyield
}
}
// Push thread's results into the reducer queues
for (auto el : buckets) {
int dst_rank = hash_str(el.first, total_ranks);
omp_set_lock(&mappers_locks[dst_rank]);
send_buffers[dst_rank].push_back(el);
omp_unset_lock(&mappers_locks[dst_rank]);
}
}
void exchange_data(int my_rank) {
for (int i = 0; i < total_ranks; ++i) {
// Skip sending to yourself, send to reducer queues
if (i == my_rank) {
for (auto &el : send_buffers[i]) {
int ind = hash_str(el.first, num_reducers);
reducer_queues[ind].push_back(el);
}
continue;
}
//send_N, recv_N - Number of words being exchanged between my_rank and rank i
int send_N = send_buffers[i].size();
int recv_N = 0;
MPI_Sendrecv(&send_N ,1 , MPI_INT, i ,0,
&recv_N , 1 , MPI_INT, i , 0 ,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
//*_lengths = lengths of words being exchanged
//*_counts = word count for each word exhcanged between ranks
vector<int> send_lengths(send_N);
vector<int> send_counts(send_N);
int total_send_chars = 0;
for(int j = 0; j < send_N; j++){
string w = send_buffers[i][j].first;
int len = w.size();
send_lengths[j] = len;
send_counts[j] = send_buffers[i][j].second;
total_send_chars += len;
}
//*_chars = buffers of all characters exchanged
vector<char> send_chars(total_send_chars);
int offset = 0;
for(int j = 0; j < send_N; j++){
string w = send_buffers[i][j].first;
int len = w.size();
if(len > 0) memcpy(&send_chars[offset], w.data(), len);
offset += len;
}
vector<int> recv_lengths(recv_N);
vector<int> recv_counts(recv_N);
//exchange word lengths
MPI_Sendrecv(send_lengths.data() , send_N , MPI_INT, i, 1,
recv_lengths.data() , recv_N , MPI_INT, i, 1,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
int total_recv_chars = 0;
for(int len : recv_lengths) total_recv_chars += len;
vector <char> recv_chars(total_recv_chars);
//exhchange actual words
MPI_Sendrecv(send_chars.data(), total_send_chars , MPI_CHAR, i, 2,
recv_chars.data(), total_recv_chars, MPI_CHAR, i, 2,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
//exchange counts for unpacking
MPI_Sendrecv(send_counts.data(), send_N , MPI_INT, i, 3,
recv_counts.data(), recv_N, MPI_INT , i, 3,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
//unpack words recieved and distribute to reducer queues
offset = 0;
for(int j = 0; j < recv_N; j++){
int len = recv_lengths[j];
string word;
word.reserve(len);
if(len > 0) {
word.assign(&recv_chars[offset], len);
offset += len;
int count = recv_counts[j];
int idx = hash_str(word, num_reducers);
reducer_queues[idx].push_back({word, count});
}
}
}
}
void reduce_step(int id) {
// Use local hash table for partial results
unordered_map<string, int> local_result;
for (auto &cur_entry : reducer_queues[id]) {
local_result[cur_entry.first] += cur_entry.second;
}
// Merge partial results into global results
omp_set_lock(&global_counts_lock);
for (auto &el : local_result) {
global_counts[el.first] += el.second;
}
omp_unset_lock(&global_counts_lock);
}
void gather_results(int my_rank) {
if (my_rank == 0) {
for (int i = 1; i < total_ranks; ++i) {
int N;
MPI_Recv(&N, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
if(N == 0) continue;
vector<int> lengths(N);
vector<int> counts(N);
MPI_Recv(lengths.data(), N, MPI_INT, i, 1, MPI_COMM_WORLD , MPI_STATUS_IGNORE);
MPI_Recv(counts.data(), N, MPI_INT, i, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
int total_chars = 0;
for(int len : lengths) {
total_chars += len;
}
vector<char> chars(total_chars);
MPI_Recv(chars.data(), total_chars, MPI_CHAR, i, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
int offset = 0;
for(int i = 0; i < N; i++){
string word;
word.assign(chars.data() + offset, lengths[i]);
global_counts[word] += counts[i];
offset += lengths[i];
}
}
}
else {
int N = global_counts.size();
MPI_Send(&N, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
if (N == 0 )return;
vector<int>lengths;
vector<int>counts;
lengths.reserve(N);
counts.reserve(N);
int total_chars = 0;
for(auto &el : global_counts) {
string word = el.first;
int len = word.size();
lengths.push_back(len);
counts.push_back(el.second);
total_chars += len;
}
MPI_Send(lengths.data() , N , MPI_INT , 0 , 1 , MPI_COMM_WORLD);
MPI_Send(counts.data(), N, MPI_INT, 0, 2, MPI_COMM_WORLD);
vector<char> chars(total_chars);
int offset = 0;
for(auto &el : global_counts) {
string word = el.first;
int len = word.size();
memcpy(chars.data() + offset, word.data(), len);
offset += len;
}
MPI_Send(chars.data(), total_chars, MPI_CHAR, 0, 3, MPI_COMM_WORLD);
}
}
int main(int argc, char* argv[]) {
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (provided < MPI_THREAD_FUNNELED) {
printf("Error: MPI_THREAD_FUNNELED is not supported.\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
if (argc < 2) {
fprintf(stderr, "usage: %s <input_files>\n", argv[0]);
MPI_Abort(MPI_COMM_WORLD, 1);
}
int n_threads = omp_get_max_threads();
num_reducers = n_threads; // Works best on my laptop -- test on ISAAC
files_remain = 0;
num_readers = n_threads;
num_mappers = n_threads;
readers_avail = num_readers;
total_ranks = size;
if (rank == 0) {
cerr << "Testing " << n_threads << " thread(s), " << size << " processes\n";
}
omp_init_lock(&readers_lock);
omp_init_lock(&global_counts_lock);
reducer_locks.resize(num_reducers);
for (int i = 0; i < num_reducers; ++i) {
omp_init_lock(&reducer_locks[i]);
}
mappers_locks.resize(total_ranks);
for (int i = 0; i < total_ranks; ++i) {
omp_init_lock(&mappers_locks[i]);
}
reducer_queues.resize(num_reducers);
send_buffers.resize(total_ranks);
double start, end, start_c, start_r, start_p;
start = MPI_Wtime();
#pragma omp parallel
{
#pragma omp master
{
// File reading step
if (rank == 0) {
int f_count = 1;
size_t active_ranks = size - 1;
bool done = false;
MPI_Status stat;
int tmp;
int flag;
int local_avail;
while (active_ranks > 0 || !done) {
// Check if any ranks sent a pending request
MPI_Iprobe(MPI_ANY_SOURCE, REQ_TAG, MPI_COMM_WORLD, &flag, &stat);
// If not, generate tasks for master rank threads
#pragma omp atomic read
local_avail = readers_avail;
if (!done && !flag && local_avail > 0) {
if (f_count < argc) {
#pragma omp atomic
files_remain++;
#pragma omp task
{
read_file(argv[f_count]);
}
f_count++;
}
else {
done = true;
}
}
else if (size > 1) {
// Use tag = 1 for requests
MPI_Recv(&tmp, 1, MPI_INT, MPI_ANY_SOURCE, REQ_TAG, MPI_COMM_WORLD, &stat);
int requesting_rank = stat.MPI_SOURCE;
int send_buff = -1;
if (f_count < argc) {
send_buff = f_count;
f_count++;
}
else {
// This rank receives -1 for "work done"
active_ranks--;
}
// Use tag = 2 for responds
MPI_Send(&send_buff, 1, MPI_INT, requesting_rank, ASSIGN_TAG, MPI_COMM_WORLD);
}
}
}
else {
int local_avail;
int rec_buff = 0;
while (true) {
#pragma omp atomic read
local_avail = readers_avail;
if (local_avail > 0) {
// Send request
MPI_Send(&rec_buff, 1, MPI_INT, 0, REQ_TAG, MPI_COMM_WORLD);
// Receive file number or -1 for "work done"
MPI_Recv(&rec_buff, 1, MPI_INT, 0, ASSIGN_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
if (rec_buff == -1) {
break;
}
#pragma omp atomic
files_remain++;
#pragma omp task
{
read_file(argv[rec_buff]);
}
}
}
}
// Mapping step
for (int i = 0; i < num_mappers; ++i) {
#pragma omp task
{
mapping_step();
}
}
}
}
start_c = MPI_Wtime();
exchange_data(rank);
start_r = MPI_Wtime();
// Reducing step
#pragma omp parallel for
for (int i = 0; i < num_reducers; ++i) {
reduce_step(i);
}
// Nothing to gather for single rank
double start_c2 = MPI_Wtime();
if (total_ranks > 1) {
gather_results(rank);
}
size_t global_total_words = 0;
MPI_Reduce(&total_words, &global_total_words, 1,
MPI_UNSIGNED_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
total_words = global_total_words;
}
start_p = MPI_Wtime();
vector<pair<string, int>> counts;
for (auto &el : global_counts) {
counts.emplace_back(el.first, el.second);
}
// Sort in alphabetical order
sort(counts.begin(), counts.end(),
[](const auto &a, const auto &b) {
return a.first < b.first;
});
// Print step
if (rank == 0) {
ofstream fout("hybrid_out.txt");
fout << "Filename: " << argv[1] << ", total words: " << global_total_words << "\n";
// ISAAC is having issues printing too much output, only print the number of unique words
// Error: srun: error: eio_handle_mainloop: Abandoning IO 60 secs after job shutdown initiated
//out << "Unique words found: " << counts.size() << "\n";
for (size_t i = 0; i < counts.size(); ++i) {
fout << "[" << i << "] " << counts[i].first << ": " << counts[i].second << "\n";
};
}
end = MPI_Wtime();
if (rank == 0) {
// Use cerr to always print in terminal
cerr << "Hybrid time: " << (end - start) * 1000 << " ms\n";
cerr << " File read & Map time: " << (start_c - start) * 1000 << " ms\n";
cerr << " Communication time: " << (start_r - start_c + (start_p - start_c2)) * 1000 << " ms\n";
cerr << " Reducing time: " << (start_c2 - start_r) * 1000 << " ms\n";
cerr << " Sort & Print time: " << (end - start_p) * 1000 << " ms\n";
}
omp_destroy_lock(&readers_lock);
omp_destroy_lock(&global_counts_lock);
for (int i = 0; i < num_reducers; ++i) {
omp_destroy_lock(&reducer_locks[i]);
}
MPI_Finalize();
return 0;
}