PR libstdc++/33485 continued.
[gcc.git] / libstdc++-v3 / include / parallel / multiway_mergesort.h
1 // -*- C++ -*-
2
3 // Copyright (C) 2007 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the terms
7 // of the GNU General Public License as published by the Free Software
8 // Foundation; either version 2, or (at your option) any later
9 // version.
10
11 // This library is distributed in the hope that it will be useful, but
12 // WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License
17 // along with this library; see the file COPYING. If not, write to
18 // the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 // MA 02111-1307, USA.
20
21 // As a special exception, you may use this file as part of a free
22 // software library without restriction. Specifically, if other files
23 // instantiate templates or use macros or inline functions from this
24 // file, or you compile this file and link it with other files to
25 // produce an executable, this file does not by itself cause the
26 // resulting executable to be covered by the GNU General Public
27 // License. This exception does not however invalidate any other
28 // reasons why the executable file might be covered by the GNU General
29 // Public License.
30
31 /** @file parallel/multiway_mergesort.h
32 * @brief Parallel multiway merge sort.
33 * This file is a GNU parallel extension to the Standard C++ Library.
34 */
35
36 // Written by Johannes Singler.
37
38 #ifndef _GLIBCXX_PARALLEL_MERGESORT_H
39 #define _GLIBCXX_PARALLEL_MERGESORT_H 1
40
41 #include <vector>
42
43 #include <parallel/basic_iterator.h>
44 #include <bits/stl_algo.h>
45 #include <parallel/parallel.h>
46 #include <parallel/multiway_merge.h>
47 #include <parallel/timing.h>
48
49 namespace __gnu_parallel
50 {
51
52 /** @brief Subsequence description. */
53 template<typename _DifferenceTp>
54 struct Piece
55 {
56 typedef _DifferenceTp difference_type;
57
58 /** @brief Begin of subsequence. */
59 difference_type begin;
60
61 /** @brief End of subsequence. */
62 difference_type end;
63 };
64
65 /** @brief Data accessed by all threads.
66 *
67 * PMWMS = parallel multiway mergesort */
68 template<typename RandomAccessIterator>
69 struct PMWMSSortingData
70 {
71 typedef std::iterator_traits<RandomAccessIterator> traits_type;
72 typedef typename traits_type::value_type value_type;
73 typedef typename traits_type::difference_type difference_type;
74
75 /** @brief Input begin. */
76 RandomAccessIterator source;
77
78 /** @brief Start indices, per thread. */
79 difference_type* starts;
80
81 /** @brief Temporary arrays for each thread.
82 *
83 * Indirection Allows using the temporary storage in different
84 * ways, without code duplication.
85 * @see _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST */
86 value_type** temporaries;
87
88 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
89 /** @brief Storage in which to sort. */
90 RandomAccessIterator* sorting_places;
91
92 /** @brief Storage into which to merge. */
93 value_type** merging_places;
94 #else
95 /** @brief Storage in which to sort. */
96 value_type** sorting_places;
97
98 /** @brief Storage into which to merge. */
99 RandomAccessIterator* merging_places;
100 #endif
101 /** @brief Samples. */
102 value_type* samples;
103
104 /** @brief Offsets to add to the found positions. */
105 difference_type* offsets;
106
107 /** @brief Pieces of data to merge @c [thread][sequence] */
108 std::vector<Piece<difference_type> >* pieces;
109 };
110
111 /** @brief Thread local data for PMWMS. */
112 template<typename RandomAccessIterator>
113 struct PMWMSSorterPU
114 {
115 /** @brief Total number of thread involved. */
116 thread_index_t num_threads;
117 /** @brief Number of owning thread. */
118 thread_index_t iam;
119 /** @brief Stable sorting desired. */
120 bool stable;
121 /** @brief Pointer to global data. */
122 PMWMSSortingData<RandomAccessIterator>* sd;
123 };
124
125 /**
126 * @brief Select samples from a sequence.
127 * @param d Pointer to thread-local data. Result will be placed in
128 * @c d->ds->samples.
129 * @param num_samples Number of samples to select.
130 */
131 template<typename RandomAccessIterator, typename _DifferenceTp>
132 inline void
133 determine_samples(PMWMSSorterPU<RandomAccessIterator>* d,
134 _DifferenceTp& num_samples)
135 {
136 typedef _DifferenceTp difference_type;
137
138 PMWMSSortingData<RandomAccessIterator>* sd = d->sd;
139
140 num_samples = Settings::sort_mwms_oversampling * d->num_threads - 1;
141
142 difference_type* es = static_cast<difference_type*>(__builtin_alloca(sizeof(difference_type) * (num_samples + 2)));
143
144 equally_split(sd->starts[d->iam + 1] - sd->starts[d->iam], num_samples + 1, es);
145
146 for (difference_type i = 0; i < num_samples; i++)
147 sd->samples[d->iam * num_samples + i] = sd->source[sd->starts[d->iam] + es[i + 1]];
148 }
149
150 /** @brief PMWMS code executed by each thread.
151 * @param d Pointer to thread-local data.
152 * @param comp Comparator.
153 */
154 template<typename RandomAccessIterator, typename Comparator>
155 inline void
156 parallel_sort_mwms_pu(PMWMSSorterPU<RandomAccessIterator>* d,
157 Comparator& comp)
158 {
159 typedef std::iterator_traits<RandomAccessIterator> traits_type;
160 typedef typename traits_type::value_type value_type;
161 typedef typename traits_type::difference_type difference_type;
162
163 Timing<sequential_tag> t;
164 t.tic();
165
166 PMWMSSortingData<RandomAccessIterator>* sd = d->sd;
167 thread_index_t iam = d->iam;
168
169 // Length of this thread's chunk, before merging.
170 difference_type length_local = sd->starts[iam + 1] - sd->starts[iam];
171
172 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
173 typedef RandomAccessIterator SortingPlacesIterator;
174
175 // Sort in input storage.
176 sd->sorting_places[iam] = sd->source + sd->starts[iam];
177 #else
178 typedef value_type* SortingPlacesIterator;
179
180 // Sort in temporary storage, leave space for sentinel.
181 sd->sorting_places[iam] = sd->temporaries[iam] = static_cast<value_type*>(::operator new(sizeof(value_type) * (length_local + 1)));
182
183 // Copy there.
184 std::uninitialized_copy(sd->source + sd->starts[iam], sd->source + sd->starts[iam] + length_local, sd->sorting_places[iam]);
185 #endif
186
187 // Sort locally.
188 if (d->stable)
189 __gnu_sequential::stable_sort(sd->sorting_places[iam], sd->sorting_places[iam] + length_local, comp);
190 else
191 __gnu_sequential::sort(sd->sorting_places[iam], sd->sorting_places[iam] + length_local, comp);
192
193 #if _GLIBCXX_ASSERTIONS
194 _GLIBCXX_PARALLEL_ASSERT(is_sorted(sd->sorting_places[iam], sd->sorting_places[iam] + length_local, comp));
195 #endif
196
197 // Invariant: locally sorted subsequence in sd->sorting_places[iam],
198 // sd->sorting_places[iam] + length_local.
199 t.tic("local sort");
200
201 if (Settings::sort_splitting == Settings::SAMPLING)
202 {
203 difference_type num_samples;
204 determine_samples(d, num_samples);
205
206 #pragma omp barrier
207
208 t.tic("sample/wait");
209
210 #pragma omp single
211 __gnu_sequential::sort(sd->samples,
212 sd->samples + (num_samples * d->num_threads),
213 comp);
214
215 #pragma omp barrier
216
217 for (int s = 0; s < d->num_threads; s++)
218 {
219 // For each sequence.
220 if (num_samples * iam > 0)
221 sd->pieces[iam][s].begin = std::lower_bound(sd->sorting_places[s],
222 sd->sorting_places[s] + sd->starts[s + 1] - sd->starts[s],
223 sd->samples[num_samples * iam],
224 comp)
225 - sd->sorting_places[s];
226 else
227 // Absolute beginning.
228 sd->pieces[iam][s].begin = 0;
229
230 if ((num_samples * (iam + 1)) < (num_samples * d->num_threads))
231 sd->pieces[iam][s].end = std::lower_bound(sd->sorting_places[s],
232 sd->sorting_places[s] + sd->starts[s + 1] - sd->starts[s], sd->samples[num_samples * (iam + 1)], comp)
233 - sd->sorting_places[s];
234 else
235 // Absolute end.
236 sd->pieces[iam][s].end = sd->starts[s + 1] - sd->starts[s];
237 }
238
239 }
240 else if (Settings::sort_splitting == Settings::EXACT)
241 {
242 #pragma omp barrier
243
244 t.tic("wait");
245
246 std::vector<std::pair<SortingPlacesIterator, SortingPlacesIterator> > seqs(d->num_threads);
247 for (int s = 0; s < d->num_threads; s++)
248 seqs[s] = std::make_pair(sd->sorting_places[s], sd->sorting_places[s] + sd->starts[s + 1] - sd->starts[s]);
249
250 std::vector<SortingPlacesIterator> offsets(d->num_threads);
251
252 // If not last thread.
253 if (iam < d->num_threads - 1)
254 multiseq_partition(seqs.begin(), seqs.end(), sd->starts[iam + 1], offsets.begin(), comp);
255
256 for (int seq = 0; seq < d->num_threads; seq++)
257 {
258 // For each sequence.
259 if (iam < (d->num_threads - 1))
260 sd->pieces[iam][seq].end = offsets[seq] - seqs[seq].first;
261 else
262 // Absolute end of this sequence.
263 sd->pieces[iam][seq].end = sd->starts[seq + 1] - sd->starts[seq];
264 }
265
266 #pragma omp barrier
267
268 for (int seq = 0; seq < d->num_threads; seq++)
269 {
270 // For each sequence.
271 if (iam > 0)
272 sd->pieces[iam][seq].begin = sd->pieces[iam - 1][seq].end;
273 else
274 // Absolute beginning.
275 sd->pieces[iam][seq].begin = 0;
276 }
277 }
278
279 t.tic("split");
280
281 // Offset from target begin, length after merging.
282 difference_type offset = 0, length_am = 0;
283 for (int s = 0; s < d->num_threads; s++)
284 {
285 length_am += sd->pieces[iam][s].end - sd->pieces[iam][s].begin;
286 offset += sd->pieces[iam][s].begin;
287 }
288
289 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
290 // Merge to temporary storage, uninitialized creation not possible
291 // since there is no multiway_merge calling the placement new
292 // instead of the assignment operator.
293 sd->merging_places[iam] = sd->temporaries[iam] = static_cast<value_type*>(::operator new(sizeof(value_type) * length_am));
294 #else
295 // Merge directly to target.
296 sd->merging_places[iam] = sd->source + offset;
297 #endif
298 std::vector<std::pair<SortingPlacesIterator, SortingPlacesIterator> > seqs(d->num_threads);
299
300 for (int s = 0; s < d->num_threads; s++)
301 {
302 seqs[s] = std::make_pair(sd->sorting_places[s] + sd->pieces[iam][s].begin, sd->sorting_places[s] + sd->pieces[iam][s].end);
303
304 #if _GLIBCXX_ASSERTIONS
305 _GLIBCXX_PARALLEL_ASSERT(is_sorted(seqs[s].first, seqs[s].second, comp));
306 #endif
307 }
308
309 multiway_merge(seqs.begin(), seqs.end(), sd->merging_places[iam], comp, length_am, d->stable, false, sequential_tag());
310
311 t.tic("merge");
312
313 #if _GLIBCXX_ASSERTIONS
314 _GLIBCXX_PARALLEL_ASSERT(is_sorted(sd->merging_places[iam], sd->merging_places[iam] + length_am, comp));
315 #endif
316
317 # pragma omp barrier
318
319 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
320 // Write back.
321 std::copy(sd->merging_places[iam], sd->merging_places[iam] + length_am,
322 sd->source + offset);
323 #endif
324
325 delete[] sd->temporaries[iam];
326
327 t.tic("copy back");
328
329 t.print();
330 }
331
332 /** @brief PMWMS main call.
333 * @param begin Begin iterator of sequence.
334 * @param end End iterator of sequence.
335 * @param comp Comparator.
336 * @param n Length of sequence.
337 * @param num_threads Number of threads to use.
338 * @param stable Stable sorting.
339 */
340 template<typename RandomAccessIterator, typename Comparator>
341 inline void
342 parallel_sort_mwms(RandomAccessIterator begin, RandomAccessIterator end,
343 Comparator comp,
344 typename std::iterator_traits<RandomAccessIterator>::difference_type n,
345 int num_threads, bool stable)
346 {
347 _GLIBCXX_CALL(n)
348
349 typedef std::iterator_traits<RandomAccessIterator> traits_type;
350 typedef typename traits_type::value_type value_type;
351 typedef typename traits_type::difference_type difference_type;
352
353 if (n <= 1)
354 return;
355
356 // At least one element per thread.
357 if (num_threads > n)
358 num_threads = static_cast<thread_index_t>(n);
359
360 PMWMSSortingData<RandomAccessIterator> sd;
361
362 sd.source = begin;
363 sd.temporaries = new value_type*[num_threads];
364
365 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
366 sd.sorting_places = new RandomAccessIterator[num_threads];
367 sd.merging_places = new value_type*[num_threads];
368 #else
369 sd.sorting_places = new value_type*[num_threads];
370 sd.merging_places = new RandomAccessIterator[num_threads];
371 #endif
372
373 if (Settings::sort_splitting == Settings::SAMPLING)
374 {
375 unsigned int sz = Settings::sort_mwms_oversampling * num_threads - 1;
376 sz *= num_threads;
377
378 // Equivalent to value_type[sz], without need of default construction.
379 sz *= sizeof(value_type);
380 sd.samples = static_cast<value_type*>(::operator new(sz));
381 }
382 else
383 sd.samples = NULL;
384
385 sd.offsets = new difference_type[num_threads - 1];
386 sd.pieces = new std::vector<Piece<difference_type> >[num_threads];
387 for (int s = 0; s < num_threads; s++)
388 sd.pieces[s].resize(num_threads);
389 PMWMSSorterPU<RandomAccessIterator>* pus = new PMWMSSorterPU<RandomAccessIterator>[num_threads];
390 difference_type* starts = sd.starts = new difference_type[num_threads + 1];
391
392 difference_type chunk_length = n / num_threads;
393 difference_type split = n % num_threads;
394 difference_type start = 0;
395 for (int i = 0; i < num_threads; i++)
396 {
397 starts[i] = start;
398 start += (i < split) ? (chunk_length + 1) : chunk_length;
399 pus[i].num_threads = num_threads;
400 pus[i].iam = i;
401 pus[i].sd = &sd;
402 pus[i].stable = stable;
403 }
404 starts[num_threads] = start;
405
406 // Now sort in parallel.
407 #pragma omp parallel num_threads(num_threads)
408 parallel_sort_mwms_pu(&(pus[omp_get_thread_num()]), comp);
409
410 // XXX sd as RAII
411 delete[] starts;
412 delete[] sd.temporaries;
413 delete[] sd.sorting_places;
414 delete[] sd.merging_places;
415
416 if (Settings::sort_splitting == Settings::SAMPLING)
417 delete[] sd.samples;
418
419 delete[] sd.offsets;
420 delete[] sd.pieces;
421
422 delete[] pus;
423 }
424
425 }
426
427 #endif