54 template <
class InputIterator,
class OutputIterator,
class ReduceOp>
56 OutputIterator t1,
const ReduceOp& op,
bool *IncludeVal)
69 for (InputIterator tmps1 = s1; tmps1 != s2; ++tmps1, ++
elements);
76 bool *useFlags = IncludeVal;
80 for (
unsigned int u=0; u <
elements; useFlags[u++] =
true);
83 if ( comm.
myNode() != parent )
95 if ( ! comm.
send(msg, parent, sendtag) )
104 if ( ! msg || msg->
size() < 1 )
105 Ippl::abort(
"reduce: cannot receive reduce results.");
114 InputIterator tmp1 = s1;
115 OutputIterator t2 = t1;
116 bool* copyf = useFlags;
117 for ( ; tmp1 != s2; ++tmp1, ++t2, ++copyf)
123 int notReceived = comm.
getNodes() - 1;
124 while (notReceived > 0)
131 if ( ! recmsg || recmsg->
size() < 1 )
132 Ippl::abort(
"reduce: cannot receive reduce buffers.");
137 if ((
unsigned int) recelems !=
elements)
138 Ippl::abort(
"reduce: mismatched element count in vector reduction.");
141 InputIterator reci = (InputIterator)(recmsg->
item(0).
data());
142 bool *recflag = (
bool *)(recmsg->
item(1).
data());
147 for (u=0, t2=t1; u <
elements; ++t2, ++reci, ++u)
178 Ippl::abort(
"reduce: cannot send reduce results.");
183 if (useFlags != 0 && useFlags != IncludeVal)
193 template <
class InputIterator,
class OutputIterator,
class ReduceOp>
194 bool reduce(InputIterator s1, InputIterator s2,
195 OutputIterator t1,
const ReduceOp& op,
bool *IncludeVal)
207 template <
class T,
class ReduceOp>
209 const ReduceOp& op,
bool IncludeVal)
219 if (comm.
myNode() != parent)
228 if ( ! comm.
send(msg, parent, sendtag) )
237 if ( ! msg || msg->
size() < 1 )
238 Ippl::abort(
"reduce: cannot receive reduce results.");
256 int notReceived = comm.
getNodes() - 1;
259 T *recval =
new T[notReceived];
260 bool *recflag =
new bool[notReceived];
263 while (notReceived > 0)
268 if ( ! recmsg || recmsg->
size() < 1 )
269 Ippl::abort(
"reduce: cannot receive reduce buffers.");
274 if (recflag[fromnode - 1])
293 output = recval[
n-1];
313 Ippl::abort(
"reduce: cannot send reduce results.");
333 template <
class T,
class ReduceOp>
352 template <
class InputIterator,
class RandomIterator,
class ScatterOp>
354 RandomIterator t1,
int *target_node,
355 int *target_position,
const ScatterOp& op)
372 int *tn = target_node;
373 int *tp = target_position;
375 for ( si = s1; si != s2 ; si++, tn++, tp++ )
377 if ( *tn < 0 || *tn >= comm.
getNodes() )
379 ERRORMSG(
"scatter: bad scatter target " << *tn <<
endl);
389 for ( i = comm.
getNodes() - 1; i >= 0; i-- )
391 if ( ! comm.
send(msg + i, i, tag,
false) )
393 ERRORMSG(
"scatter: cannot send scatter buffer " << i <<
endl);
400 while (notReceived > 0)
406 ERRORMSG(
"scatter: cannot receive scatter message." <<
endl);
411 int pairs = recmsg->
size() / 2;
414 for ( i = 0 ; i < pairs ; i++ )
418 reci = (InputIterator)(recmsg->
item(0).
data());
425 if ( fromnode != comm.
myNode() )
439 template <
class InputIterator,
class RandomIterator,
class ScatterOp>
440 bool scatter(InputIterator s1, InputIterator s2,
441 RandomIterator t1,
int *target_node,
442 int *target_position,
const ScatterOp& op)
448 template <
typename T>
449 void gather(
const T* input,
T* output,
int count,
int root) {
450 MPI_Datatype
type = get_mpi_datatype<T>(*input);
452 MPI_Gather(
const_cast<T*
>(input), count,
type,
457 template <
typename T>
458 void scatter(
const T* input,
T* output,
int count,
int root) {
459 MPI_Datatype
type = get_mpi_datatype<T>(*input);
461 MPI_Scatter(
const_cast<T*
>(input), count,
type,
466 template <
typename T,
class Op>
467 void reduce(
const T* input,
T* output,
int count, Op op,
int root) {
468 MPI_Datatype
type = get_mpi_datatype<T>(*input);
470 MPI_Op mpiOp = get_mpi_op<Op>(op);
472 MPI_Reduce(
const_cast<T*
>(input), output, count,
type,
476 template <
typename T,
class Op>
477 void new_reduce(
const T* input,
T* output,
int count, Op op,
int root) {
478 MPI_Datatype
type = get_mpi_datatype<T>(*input);
480 MPI_Op mpiOp = get_mpi_op<Op>(op);
482 MPI_Reduce(
const_cast<T*
>(input), output, count,
type,
487 template <
typename T,
class Op>
489 MPI_Datatype
type = get_mpi_datatype<T>(*inout);
491 MPI_Op mpiOp = get_mpi_op<Op>(op);
494 MPI_Reduce(MPI_IN_PLACE, inout, count,
type,
497 MPI_Reduce(inout, inout, count,
type,
503 template <
typename T,
class Op>
504 void reduce(
const T& input,
T& output,
int count, Op op,
int root) {
505 reduce(&input, &output, count, op, root);
509 template <
typename T,
class Op>
511 MPI_Datatype
type = get_mpi_datatype<T>(*input);
513 MPI_Op mpiOp = get_mpi_op<Op>(op);
515 MPI_Allreduce(
const_cast<T*
>(input), output, count,
type,
519 template <
typename T,
class Op>
525 template <
typename T,
class Op>
527 MPI_Datatype
type = get_mpi_datatype<T>(*inout);
529 MPI_Op mpiOp = get_mpi_op<Op>(op);
531 MPI_Allreduce(MPI_IN_PLACE, inout, count,
type,
536 template <
typename T,
class Op>
void PETE_apply(const OpPeriodic< T > &, T &a, const T &b)
void allreduce(const T *input, T *output, int count, Op op)
bool reduce_masked(Communicate &comm, T &input, T &output, const ReduceOp &op, bool IncludeVal)
bool reduce(Communicate &comm, InputIterator s1, InputIterator s2, OutputIterator t1, const ReduceOp &op, bool *IncludeVal)
bool scatter(Communicate &comm, InputIterator s1, InputIterator s2, RandomIterator t1, int *target_node, int *target_position, const ScatterOp &op)
void gather(const T *input, T *output, int count, int root)
void new_reduce(const T *input, T *output, int count, Op op, int root)
void putMessage(Message &m, const T &t)
void getMessage(Message &m, T &t)
#define COMM_REDUCE_SEND_TAG
#define COMM_REDUCE_CYCLE
#define COMM_REDUCE_RECV_TAG
#define COMM_REDUCE_SCATTER_TAG
Inform & endl(Inform &inf)
boost::function< boost::tuple< double, bool >arguments_t)> type
bool send(Message *, int node, int tag, bool delmsg=true)
virtual int broadcast_others(Message *, int, bool delmsg=true)
Message * receive_block(int &node, int &tag)
Message & get(const T &cval)
int next_tag(int t, int s=1000)
static void abort(const char *=0)
static MPI_Comm getComm()
static Communicate * Comm