multiway_merge.h: Removed Timing<inactive_tag>
[gcc.git] / libstdc++-v3 / include / parallel / multiway_mergesort.h
1 // -*- C++ -*-
2
3 // Copyright (C) 2007 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the terms
7 // of the GNU General Public License as published by the Free Software
8 // Foundation; either version 2, or (at your option) any later
9 // version.
10
11 // This library is distributed in the hope that it will be useful, but
12 // WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License
17 // along with this library; see the file COPYING. If not, write to
18 // the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 // MA 02111-1307, USA.
20
21 // As a special exception, you may use this file as part of a free
22 // software library without restriction. Specifically, if other files
23 // instantiate templates or use macros or inline functions from this
24 // file, or you compile this file and link it with other files to
25 // produce an executable, this file does not by itself cause the
26 // resulting executable to be covered by the GNU General Public
27 // License. This exception does not however invalidate any other
28 // reasons why the executable file might be covered by the GNU General
29 // Public License.
30
31 /** @file parallel/multiway_mergesort.h
32 * @brief Parallel multiway merge sort.
33 * This file is a GNU parallel extension to the Standard C++ Library.
34 */
35
36 // Written by Johannes Singler.
37
38 #ifndef _GLIBCXX_PARALLEL_MERGESORT_H
39 #define _GLIBCXX_PARALLEL_MERGESORT_H 1
40
41 #include <vector>
42
43 #include <parallel/basic_iterator.h>
44 #include <bits/stl_algo.h>
45 #include <parallel/parallel.h>
46 #include <parallel/multiway_merge.h>
47
48 namespace __gnu_parallel
49 {
50
51 /** @brief Subsequence description. */
52 template<typename _DifferenceTp>
53 struct Piece
54 {
55 typedef _DifferenceTp difference_type;
56
57 /** @brief Begin of subsequence. */
58 difference_type begin;
59
60 /** @brief End of subsequence. */
61 difference_type end;
62 };
63
64 /** @brief Data accessed by all threads.
65 *
66 * PMWMS = parallel multiway mergesort */
67 template<typename RandomAccessIterator>
68 struct PMWMSSortingData
69 {
70 typedef std::iterator_traits<RandomAccessIterator> traits_type;
71 typedef typename traits_type::value_type value_type;
72 typedef typename traits_type::difference_type difference_type;
73
74 /** @brief Input begin. */
75 RandomAccessIterator source;
76
77 /** @brief Start indices, per thread. */
78 difference_type* starts;
79
80 /** @brief Temporary arrays for each thread.
81 *
82 * Indirection Allows using the temporary storage in different
83 * ways, without code duplication.
84 * @see _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST */
85 value_type** temporaries;
86
87 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
88 /** @brief Storage in which to sort. */
89 RandomAccessIterator* sorting_places;
90
91 /** @brief Storage into which to merge. */
92 value_type** merging_places;
93 #else
94 /** @brief Storage in which to sort. */
95 value_type** sorting_places;
96
97 /** @brief Storage into which to merge. */
98 RandomAccessIterator* merging_places;
99 #endif
100 /** @brief Samples. */
101 value_type* samples;
102
103 /** @brief Offsets to add to the found positions. */
104 difference_type* offsets;
105
106 /** @brief Pieces of data to merge @c [thread][sequence] */
107 std::vector<Piece<difference_type> >* pieces;
108 };
109
110 /** @brief Thread local data for PMWMS. */
111 template<typename RandomAccessIterator>
112 struct PMWMSSorterPU
113 {
114 /** @brief Total number of thread involved. */
115 thread_index_t num_threads;
116 /** @brief Number of owning thread. */
117 thread_index_t iam;
118 /** @brief Stable sorting desired. */
119 bool stable;
120 /** @brief Pointer to global data. */
121 PMWMSSortingData<RandomAccessIterator>* sd;
122 };
123
124 /**
125 * @brief Select samples from a sequence.
126 * @param d Pointer to thread-local data. Result will be placed in
127 * @c d->ds->samples.
128 * @param num_samples Number of samples to select.
129 */
130 template<typename RandomAccessIterator, typename _DifferenceTp>
131 inline void
132 determine_samples(PMWMSSorterPU<RandomAccessIterator>* d,
133 _DifferenceTp& num_samples)
134 {
135 typedef _DifferenceTp difference_type;
136
137 PMWMSSortingData<RandomAccessIterator>* sd = d->sd;
138
139 num_samples = Settings::sort_mwms_oversampling * d->num_threads - 1;
140
141 difference_type* es = static_cast<difference_type*>(__builtin_alloca(sizeof(difference_type) * (num_samples + 2)));
142
143 equally_split(sd->starts[d->iam + 1] - sd->starts[d->iam], num_samples + 1, es);
144
145 for (difference_type i = 0; i < num_samples; i++)
146 sd->samples[d->iam * num_samples + i] = sd->source[sd->starts[d->iam] + es[i + 1]];
147 }
148
149 /** @brief PMWMS code executed by each thread.
150 * @param d Pointer to thread-local data.
151 * @param comp Comparator.
152 */
153 template<typename RandomAccessIterator, typename Comparator>
154 inline void
155 parallel_sort_mwms_pu(PMWMSSorterPU<RandomAccessIterator>* d,
156 Comparator& comp)
157 {
158 typedef std::iterator_traits<RandomAccessIterator> traits_type;
159 typedef typename traits_type::value_type value_type;
160 typedef typename traits_type::difference_type difference_type;
161
162 PMWMSSortingData<RandomAccessIterator>* sd = d->sd;
163 thread_index_t iam = d->iam;
164
165 // Length of this thread's chunk, before merging.
166 difference_type length_local = sd->starts[iam + 1] - sd->starts[iam];
167
168 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
169 typedef RandomAccessIterator SortingPlacesIterator;
170
171 // Sort in input storage.
172 sd->sorting_places[iam] = sd->source + sd->starts[iam];
173 #else
174 typedef value_type* SortingPlacesIterator;
175
176 // Sort in temporary storage, leave space for sentinel.
177 sd->sorting_places[iam] = sd->temporaries[iam] = static_cast<value_type*>(::operator new(sizeof(value_type) * (length_local + 1)));
178
179 // Copy there.
180 std::uninitialized_copy(sd->source + sd->starts[iam], sd->source + sd->starts[iam] + length_local, sd->sorting_places[iam]);
181 #endif
182
183 // Sort locally.
184 if (d->stable)
185 __gnu_sequential::stable_sort(sd->sorting_places[iam], sd->sorting_places[iam] + length_local, comp);
186 else
187 __gnu_sequential::sort(sd->sorting_places[iam], sd->sorting_places[iam] + length_local, comp);
188
189 #if _GLIBCXX_ASSERTIONS
190 _GLIBCXX_PARALLEL_ASSERT(is_sorted(sd->sorting_places[iam], sd->sorting_places[iam] + length_local, comp));
191 #endif
192
193 // Invariant: locally sorted subsequence in sd->sorting_places[iam],
194 // sd->sorting_places[iam] + length_local.
195
196 if (Settings::sort_splitting == Settings::SAMPLING)
197 {
198 difference_type num_samples;
199 determine_samples(d, num_samples);
200
201 #pragma omp barrier
202
203 #pragma omp single
204 __gnu_sequential::sort(sd->samples,
205 sd->samples + (num_samples * d->num_threads),
206 comp);
207
208 #pragma omp barrier
209
210 for (int s = 0; s < d->num_threads; s++)
211 {
212 // For each sequence.
213 if (num_samples * iam > 0)
214 sd->pieces[iam][s].begin = std::lower_bound(sd->sorting_places[s],
215 sd->sorting_places[s] + sd->starts[s + 1] - sd->starts[s],
216 sd->samples[num_samples * iam],
217 comp)
218 - sd->sorting_places[s];
219 else
220 // Absolute beginning.
221 sd->pieces[iam][s].begin = 0;
222
223 if ((num_samples * (iam + 1)) < (num_samples * d->num_threads))
224 sd->pieces[iam][s].end = std::lower_bound(sd->sorting_places[s],
225 sd->sorting_places[s] + sd->starts[s + 1] - sd->starts[s], sd->samples[num_samples * (iam + 1)], comp)
226 - sd->sorting_places[s];
227 else
228 // Absolute end.
229 sd->pieces[iam][s].end = sd->starts[s + 1] - sd->starts[s];
230 }
231
232 }
233 else if (Settings::sort_splitting == Settings::EXACT)
234 {
235 #pragma omp barrier
236
237 std::vector<std::pair<SortingPlacesIterator, SortingPlacesIterator> > seqs(d->num_threads);
238 for (int s = 0; s < d->num_threads; s++)
239 seqs[s] = std::make_pair(sd->sorting_places[s], sd->sorting_places[s] + sd->starts[s + 1] - sd->starts[s]);
240
241 std::vector<SortingPlacesIterator> offsets(d->num_threads);
242
243 // If not last thread.
244 if (iam < d->num_threads - 1)
245 multiseq_partition(seqs.begin(), seqs.end(), sd->starts[iam + 1], offsets.begin(), comp);
246
247 for (int seq = 0; seq < d->num_threads; seq++)
248 {
249 // For each sequence.
250 if (iam < (d->num_threads - 1))
251 sd->pieces[iam][seq].end = offsets[seq] - seqs[seq].first;
252 else
253 // Absolute end of this sequence.
254 sd->pieces[iam][seq].end = sd->starts[seq + 1] - sd->starts[seq];
255 }
256
257 #pragma omp barrier
258
259 for (int seq = 0; seq < d->num_threads; seq++)
260 {
261 // For each sequence.
262 if (iam > 0)
263 sd->pieces[iam][seq].begin = sd->pieces[iam - 1][seq].end;
264 else
265 // Absolute beginning.
266 sd->pieces[iam][seq].begin = 0;
267 }
268 }
269
270 // Offset from target begin, length after merging.
271 difference_type offset = 0, length_am = 0;
272 for (int s = 0; s < d->num_threads; s++)
273 {
274 length_am += sd->pieces[iam][s].end - sd->pieces[iam][s].begin;
275 offset += sd->pieces[iam][s].begin;
276 }
277
278 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
279 // Merge to temporary storage, uninitialized creation not possible
280 // since there is no multiway_merge calling the placement new
281 // instead of the assignment operator.
282 sd->merging_places[iam] = sd->temporaries[iam] = static_cast<value_type*>(::operator new(sizeof(value_type) * length_am));
283 #else
284 // Merge directly to target.
285 sd->merging_places[iam] = sd->source + offset;
286 #endif
287 std::vector<std::pair<SortingPlacesIterator, SortingPlacesIterator> > seqs(d->num_threads);
288
289 for (int s = 0; s < d->num_threads; s++)
290 {
291 seqs[s] = std::make_pair(sd->sorting_places[s] + sd->pieces[iam][s].begin, sd->sorting_places[s] + sd->pieces[iam][s].end);
292
293 #if _GLIBCXX_ASSERTIONS
294 _GLIBCXX_PARALLEL_ASSERT(is_sorted(seqs[s].first, seqs[s].second, comp));
295 #endif
296 }
297
298 multiway_merge(seqs.begin(), seqs.end(), sd->merging_places[iam], comp, length_am, d->stable, false, sequential_tag());
299
300 #if _GLIBCXX_ASSERTIONS
301 _GLIBCXX_PARALLEL_ASSERT(is_sorted(sd->merging_places[iam], sd->merging_places[iam] + length_am, comp));
302 #endif
303
304 # pragma omp barrier
305
306 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
307 // Write back.
308 std::copy(sd->merging_places[iam], sd->merging_places[iam] + length_am,
309 sd->source + offset);
310 #endif
311
312 delete[] sd->temporaries[iam];
313 }
314
315 /** @brief PMWMS main call.
316 * @param begin Begin iterator of sequence.
317 * @param end End iterator of sequence.
318 * @param comp Comparator.
319 * @param n Length of sequence.
320 * @param num_threads Number of threads to use.
321 * @param stable Stable sorting.
322 */
323 template<typename RandomAccessIterator, typename Comparator>
324 inline void
325 parallel_sort_mwms(RandomAccessIterator begin, RandomAccessIterator end,
326 Comparator comp,
327 typename std::iterator_traits<RandomAccessIterator>::difference_type n,
328 int num_threads, bool stable)
329 {
330 _GLIBCXX_CALL(n)
331
332 typedef std::iterator_traits<RandomAccessIterator> traits_type;
333 typedef typename traits_type::value_type value_type;
334 typedef typename traits_type::difference_type difference_type;
335
336 if (n <= 1)
337 return;
338
339 // At least one element per thread.
340 if (num_threads > n)
341 num_threads = static_cast<thread_index_t>(n);
342
343 PMWMSSortingData<RandomAccessIterator> sd;
344
345 sd.source = begin;
346 sd.temporaries = new value_type*[num_threads];
347
348 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
349 sd.sorting_places = new RandomAccessIterator[num_threads];
350 sd.merging_places = new value_type*[num_threads];
351 #else
352 sd.sorting_places = new value_type*[num_threads];
353 sd.merging_places = new RandomAccessIterator[num_threads];
354 #endif
355
356 if (Settings::sort_splitting == Settings::SAMPLING)
357 {
358 unsigned int sz = Settings::sort_mwms_oversampling * num_threads - 1;
359 sz *= num_threads;
360
361 // Equivalent to value_type[sz], without need of default construction.
362 sz *= sizeof(value_type);
363 sd.samples = static_cast<value_type*>(::operator new(sz));
364 }
365 else
366 sd.samples = NULL;
367
368 sd.offsets = new difference_type[num_threads - 1];
369 sd.pieces = new std::vector<Piece<difference_type> >[num_threads];
370 for (int s = 0; s < num_threads; s++)
371 sd.pieces[s].resize(num_threads);
372 PMWMSSorterPU<RandomAccessIterator>* pus = new PMWMSSorterPU<RandomAccessIterator>[num_threads];
373 difference_type* starts = sd.starts = new difference_type[num_threads + 1];
374
375 difference_type chunk_length = n / num_threads;
376 difference_type split = n % num_threads;
377 difference_type start = 0;
378 for (int i = 0; i < num_threads; i++)
379 {
380 starts[i] = start;
381 start += (i < split) ? (chunk_length + 1) : chunk_length;
382 pus[i].num_threads = num_threads;
383 pus[i].iam = i;
384 pus[i].sd = &sd;
385 pus[i].stable = stable;
386 }
387 starts[num_threads] = start;
388
389 // Now sort in parallel.
390 #pragma omp parallel num_threads(num_threads)
391 parallel_sort_mwms_pu(&(pus[omp_get_thread_num()]), comp);
392
393 // XXX sd as RAII
394 delete[] starts;
395 delete[] sd.temporaries;
396 delete[] sd.sorting_places;
397 delete[] sd.merging_places;
398
399 if (Settings::sort_splitting == Settings::SAMPLING)
400 delete[] sd.samples;
401
402 delete[] sd.offsets;
403 delete[] sd.pieces;
404
405 delete[] pus;
406 }
407
408 }
409
410 #endif