SELF_CHECK (counter == NUMBER);
#undef NUMBER
+
+ /* Check that if there are fewer tasks than threads, then we won't
+ end up with a null result. */
+ std::vector<std::unique_ptr<int>> intresults;
+ std::atomic<bool> any_empty_tasks (false);
+
+ FOR_EACH (1, 0, 1,
+ [&] (int start, int end)
+ {
+ if (start == end)
+ any_empty_tasks = true;
+ return std::unique_ptr<int> (new int (end - start));
+ });
+ SELF_CHECK (!any_empty_tasks);
+ SELF_CHECK (std::all_of (intresults.begin (),
+ intresults.end (),
+ [] (const std::unique_ptr<int> &entry)
+ {
+ return entry != nullptr;
+ }));
+
+ /* The same but using the task size parameter. */
+ intresults.clear ();
+ any_empty_tasks = false;
+ FOR_EACH (1, 0, 1,
+ [&] (int start, int end)
+ {
+ if (start == end)
+ any_empty_tasks = true;
+ return std::unique_ptr<int> (new int (end - start));
+ },
+ task_size_one);
+ SELF_CHECK (!any_empty_tasks);
+ SELF_CHECK (std::all_of (intresults.begin (),
+ intresults.end (),
+ [] (const std::unique_ptr<int> &entry)
+ {
+ return entry != nullptr;
+ }));
}
#endif /* FOR_EACH */
return result;
}
+ /* Resize the results to N. */
+ void resize (size_t n)
+ {
+ m_futures.resize (n);
+ }
+
private:
/* A vector of futures coming from the tasks run in the
}
}
+ /* Resize the results to N. */
+ void resize (size_t n)
+ {
+ m_futures.resize (n);
+ }
+
private:
std::vector<gdb::future<void>> m_futures;
end = j;
remaining_size -= chunk_size;
}
+
+ /* This case means we don't have enough elements to really
+ distribute them. Rather than ever submit a task that does
+ nothing, we short-circuit here. */
+ if (first == end)
+ end = last;
+
+ if (end == last)
+ {
+ /* We're about to dispatch the last batch of elements, which
+ we normally process in the main thread. So just truncate
+ the result list here. This avoids submitting empty tasks
+ to the thread pool. */
+ count = i;
+ results.resize (count);
+ break;
+ }
+
if (parallel_for_each_debug)
{
debug_printf (_("Parallel for: elements on worker thread %i\t: %zu"),