OPAL (Object Oriented Parallel Accelerator Library)  2.2.0
OPAL
Pilot.h
Go to the documentation of this file.
1 #ifndef __PILOT_H__
2 #define __PILOT_H__
3 
4 #include <mpi.h>
5 #include <iostream>
6 #include <string>
7 #include <unistd.h>
8 
9 #include "boost/smart_ptr.hpp"
10 //#include "boost/dynamic_bitset.hpp"
11 
12 #include "Comm/MasterNode.h"
13 #include "Comm/CommSplitter.h"
14 
15 #include "Util/Types.h"
16 #include "Util/CmdArguments.h"
17 #include "Util/OptPilotException.h"
18 
19 #include "Pilot/Poller.h"
20 #include "Pilot/Worker.h"
21 #include "Optimizer/Optimizer.h"
22 
23 #include "Util/Trace/Trace.h"
24 #include "Util/Trace/FileSink.h"
26 
28 
29 
79 template <
80  class Opt_t
81  , class Sim_t
82  , class SolPropagationGraph_t
83  , class Comm_t
84 >
85 class Pilot : protected Poller {
86 
87 public:
88 
89  // constructor only for Pilot classes inherited from this class
90  // they have their own setup function
91  Pilot(CmdArguments_t args, boost::shared_ptr<Comm_t> comm,
92  const DVarContainer_t &dvar)
93  : Poller(comm->mpiComm())
94  , comm_(comm)
95  , cmd_args_(args)
96  , dvars_(dvar)
97  {
98  // do nothing
99  }
100 
101  Pilot(CmdArguments_t args, boost::shared_ptr<Comm_t> comm,
102  functionDictionary_t known_expr_funcs)
103  : Poller(comm->mpiComm())
104  , comm_(comm)
105  , cmd_args_(args)
106  {
107  setup(known_expr_funcs);
108  }
109 
110  Pilot(CmdArguments_t args, boost::shared_ptr<Comm_t> comm,
111  functionDictionary_t known_expr_funcs,
112  const DVarContainer_t &dvar,
113  const Expressions::Named_t &obj,
114  const Expressions::Named_t &cons,
115  std::vector<double> hypervolRef = {},
116  bool isOptimizerRun = true)
117  : Poller(comm->mpiComm())
118  , comm_(comm)
119  , cmd_args_(args)
120  , objectives_(obj)
121  , constraints_(cons)
122  , dvars_(dvar)
123  , hypervolRef_(hypervolRef)
124  {
125  if (isOptimizerRun)
126  setup(known_expr_funcs);
127  }
128 
129  virtual ~Pilot()
130  {
131  for (auto itr = objectives_.begin(); itr != objectives_.end(); ++ itr)
132  delete itr->second;
133 
134  for (auto itr = constraints_.begin(); itr != constraints_.end(); ++ itr)
135  delete itr->second;
136  }
137 
138 
139 protected:
140 
142  MPI_Comm worker_comm_;
144  MPI_Comm opt_comm_;
146  MPI_Comm coworker_comm_;
147 
148  boost::shared_ptr<Comm_t> comm_;
150 
154 
156 
157  typedef MasterNode< typename Opt_t::SolutionState_t,
158  SolPropagationGraph_t > MasterNode_t;
159  boost::scoped_ptr< MasterNode_t > master_node_;
160 
162  std::string input_file_;
163 
167 
171  std::vector<double> hypervolRef_;
172 
173 
174  // keep track of state of all workers
175  std::vector<bool> is_worker_idle_;
176  //boost::dynamic_bitset<> is_worker_idle_;
177 
179  typedef std::map<size_t, std::pair<Param_t, reqVarContainer_t> > Jobs_t;
183 
184  //DEBUG
185  boost::scoped_ptr<Trace> job_trace_;
186 
187 private:
188  void setup(functionDictionary_t known_expr_funcs) {
189  global_rank_ = comm_->globalRank();
190 
191  if(global_rank_ == 0) {
192  std::cout << "\033[01;35m";
193  std::cout << " _ _ _ _ " << std::endl;
194  std::cout << " | | (_) | | | " << std::endl;
195  std::cout << " ___ _ __ | |_ ______ _ __ _| | ___ | |_ " << std::endl;
196  std::cout << " / _ \\| '_ \\| __|______| '_ \\| | |/ _ \\| __|" << std::endl;
197  std::cout << "| (_) | |_) | |_ | |_) | | | (_) | |_ " << std::endl;
198  std::cout << " \\___/| .__/ \\__| | .__/|_|_|\\___/ \\__|" << std::endl;
199  std::cout << " | | | | " << std::endl;
200  std::cout << " |_| |_| " << std::endl;
201  // ADA std::cout << "☷ Version: \t" << PACKAGE_VERSION << std::endl;
202  //std::cout << "☷ Git: \t\t" << GIT_VERSION << std::endl;
203  //std::cout << "☷ Build Date: \t" << BUILD_DATE << std::endl;
204  std::cout << "\e[0m";
205  std::cout << std::endl;
206  }
207 
208  MPI_Barrier(MPI_COMM_WORLD);
209  parseInputFile(known_expr_funcs, true);
210 
211  // here the control flow starts to diverge
212  if ( comm_->isOptimizer() ) { startOptimizer(); }
213  else if ( comm_->isWorker() ) { startWorker(); }
214  else if ( comm_->isPilot() ) { startPilot(); }
215  }
216 
217 protected:
218 
219  void parseInputFile(functionDictionary_t known_expr_funcs, bool isOptimizationRun) {
220 
221  try {
222  input_file_ = cmd_args_->getArg<std::string>("inputfile", true);
223  } catch (OptPilotException &e) {
224  std::cout << "Could not find 'inputfile' in arguments.. Aborting."
225  << std::endl;
226  MPI_Abort(comm_m, -101);
227  }
228 
229  if((isOptimizationRun && objectives_.size() == 0) || dvars_.size() == 0) {
230  throw OptPilotException("Pilot::Pilot()",
231  "No objectives or dvars specified");
232  }
233 
234  if(global_rank_ == 0) {
235  std::ostringstream os;
236  os << "\033[01;35m";
237  os << " ✔ " << objectives_.size()
238  << " objectives" << std::endl;
239  if (isOptimizationRun) {
240  os << " ✔ " << constraints_.size()
241  << " constraints" << std::endl;
242  }
243  os << " ✔ " << dvars_.size()
244  << " dvars" << std::endl;
245  os << "\e[0m";
246  os << std::endl;
247  std::cout << os.str() << std::flush;
248  }
249 
250  MPI_Barrier(MPI_COMM_WORLD);
251  }
252 
253  virtual
254  void startOptimizer() {
255 
256  std::ostringstream os;
257  os << "\033[01;35m" << " " << global_rank_ << " (PID: " << getpid() << ") ▶ Opt"
258  << "\e[0m" << std::endl;
259  std::cout << os.str() << std::flush;
260 
261  boost::scoped_ptr<Opt_t> opt(
262  new Opt_t(objectives_, constraints_, dvars_, objectives_.size(),
263  comm_->getBundle(), cmd_args_, hypervolRef_, comm_->getNrWorkerGroups()));
264  opt->initialize();
265 
266  std::cout << "Stop Opt.." << std::endl;
267  }
268 
269  virtual
270  void startWorker() {
271 
272  std::ostringstream os;
273  os << "\033[01;35m" << " " << global_rank_ << " (PID: " << getpid() << ") ▶ Worker"
274  << "\e[0m" << std::endl;
275  std::cout << os.str() << std::flush;
276 
277  size_t pos = input_file_.find_last_of("/");
278  std::string tmplfile = input_file_;
279  if(pos != std::string::npos)
280  tmplfile = input_file_.substr(pos+1);
281  pos = tmplfile.find(".");
282  std::string simName = tmplfile.substr(0,pos);
283 
284  boost::scoped_ptr< Worker<Sim_t> > w(
285  new Worker<Sim_t>(objectives_, constraints_, simName,
286  comm_->getBundle(), cmd_args_));
287 
288  std::cout << "Stop Worker.." << std::endl;
289  }
290 
291  virtual
292  void startPilot() {
293 
294  std::ostringstream os;
295  os << "\033[01;35m" << " " << global_rank_ << " (PID: " << getpid() << ") ▶ Pilot"
296  << "\e[0m" << std::endl;
297  std::cout << os.str() << std::flush;
298 
299  // Traces
300  std::ostringstream trace_filename;
301  trace_filename << "pilot.trace." << comm_->getBundle().island_id;
302  job_trace_.reset(new Trace("Optimizer Job Trace"));
303  job_trace_->registerComponent( "sink",
304  boost::shared_ptr<TraceComponent>(new FileSink(trace_filename.str())));
305 
306  worker_comm_ = comm_->getBundle().worker;
307  opt_comm_ = comm_->getBundle().opt;
308  coworker_comm_ = comm_->getBundle().world;
309 
311  MPI_Comm_rank(worker_comm_, &my_rank_in_worker_comm_);
313  MPI_Comm_rank(opt_comm_, &my_rank_in_opt_comm_);
314 
316  MPI_Comm_size(worker_comm_, &total_available_workers_);
319 
320  // setup master network
321  num_coworkers_ = 0;
322  MPI_Comm_size(coworker_comm_, &num_coworkers_);
323  if(num_coworkers_ > 1) {
324  //FIXME: proper upper bound for window size
325  int alpha = cmd_args_->getArg<int>("initialPopulation", false);
326  int opt_size = objectives_.size() + constraints_.size();
327  int overhead = 10;
328  size_t upperbound_buffer_size =
329  sizeof(double) * alpha * (1 + opt_size) * 1000
330  + overhead;
331  master_node_.reset(
332  new MasterNode< typename Opt_t::SolutionState_t,
333  SolPropagationGraph_t >(
334  coworker_comm_, upperbound_buffer_size, objectives_.size(),
335  comm_->getBundle().island_id));
336  }
337 
338  has_opt_converged_ = false;
339  continue_polling_ = true;
340  run();
341 
342  std::cout << "Stop Pilot.." << std::endl;
343  }
344 
345  virtual
346  void setupPoll()
347  {}
348 
349  virtual
350  void prePoll()
351  {}
352 
353  virtual
354  void onStop()
355  {}
356 
357  virtual
358  void postPoll() {
359  // terminating all workers is tricky since we do not know their state.
360  // All workers are notified (to terminate) when opt has converged and
361  // all workers are idle.
362  bool all_worker_idle = true;
363 
364  // in the case where new requests became available after worker
365  // delivered last results (and switched to idle state).
366  for(int i = 0; i < total_available_workers_; i++) {
367 
368  if(i == my_rank_in_worker_comm_) continue;
369 
370  all_worker_idle = all_worker_idle && is_worker_idle_[i];
371 
372  if(is_worker_idle_[i] && request_queue_.size() > 0)
374  }
375 
376  // when all workers have been notified we can stop polling
377  if(all_worker_idle && has_opt_converged_) {
378  continue_polling_ = false;
379  int dummy = 0;
380  for(int worker = 0; worker < total_available_workers_; worker++) {
381  MPI_Request req;
382  MPI_Isend(&dummy, 1, MPI_INT, worker,
383  MPI_STOP_TAG, worker_comm_, &req);
384  }
385  }
386  }
387 
388 
389  virtual
390  void sendNewJobToWorker(int worker) {
391 
392  // no new jobs once our opt has converged
393  if(has_opt_converged_) return;
394 
395  JobIter_t job = request_queue_.begin();
396  size_t jid = job->first;
397 
398  Param_t job_params = job->second.first;
399  MPI_Send(&jid, 1, MPI_UNSIGNED_LONG, worker, MPI_WORK_JOBID_TAG, worker_comm_);
400  MPI_Send_params(job_params, worker, worker_comm_);
401 
402  //reqVarContainer_t job_reqvars = job->second.second;
403  //MPI_Send_reqvars(job_reqvars, worker, worker_comm_);
404 
405  running_job_list_.insert(std::pair<size_t,
406  std::pair<Param_t, reqVarContainer_t> >(job->first, job->second));
407  request_queue_.erase(jid);
408  is_worker_idle_[worker] = false;
409 
410  std::ostringstream dump;
411  dump << "sent job with ID " << jid << " to worker " << worker
412  << std::endl;
413  job_trace_->log(dump);
414 
415  }
416 
417 
418  virtual
419  bool onMessage(MPI_Status status, size_t recv_value){
420 
421  MPITag_t tag = MPITag_t(status.MPI_TAG);
422  switch(tag) {
423 
424  case WORKER_FINISHED_TAG: {
425 
426  size_t job_id = recv_value;
427 
428  size_t dummy = 1;
429  MPI_Send(&dummy, 1, MPI_UNSIGNED_LONG, status.MPI_SOURCE,
431 
432  reqVarContainer_t res;
433  MPI_Recv_reqvars(res, status.MPI_SOURCE, worker_comm_);
434 
435  running_job_list_.erase(job_id);
436  is_worker_idle_[status.MPI_SOURCE] = true;
437 
438  std::ostringstream dump;
439  dump << "worker finished job with ID " << job_id << std::endl;
440  job_trace_->log(dump);
441 
442 
443  // optimizer already terminated, cannot accept new messages
444  if(has_opt_converged_) return true;
445 
446  int opt_master_rank = comm_->getLeader();
447  MPI_Send(&job_id, 1, MPI_UNSIGNED_LONG, opt_master_rank,
449 
450  MPI_Send_reqvars(res, opt_master_rank, opt_comm_);
451 
452  // we keep worker busy _after_ results have been sent to optimizer
453  if(request_queue_.size() > 0)
454  sendNewJobToWorker(status.MPI_SOURCE);
455 
456  return true;
457  }
458 
459  case OPT_NEW_JOB_TAG: {
460 
461  size_t job_id = recv_value;
462  int opt_master_rank = comm_->getLeader();
463 
464  Param_t job_params;
465  MPI_Recv_params(job_params, (size_t)opt_master_rank, opt_comm_);
466 
467  reqVarContainer_t reqVars;
468  //MPI_Recv_reqvars(reqVars, (size_t)opt_master_rank, job_size, opt_comm_);
469 
470  std::pair<Param_t, reqVarContainer_t> job =
471  std::pair<Param_t, reqVarContainer_t>(job_params, reqVars);
472  request_queue_.insert(
473  std::pair<size_t, std::pair<Param_t, reqVarContainer_t> >(
474  job_id, job));
475 
476  std::ostringstream dump;
477  dump << "new opt job with ID " << job_id << std::endl;
478  job_trace_->log(dump);
479 
480  return true;
481  }
482 
483  case EXCHANGE_SOL_STATE_TAG: {
484 
485  if(num_coworkers_ <= 1) return true;
486 
487  std::ostringstream dump;
488  dump << "starting solution exchange.. " << status.MPI_SOURCE << std::endl;
489  job_trace_->log(dump);
490 
491  // we start by storing or local solution state
492  size_t buffer_size = recv_value;
493  int opt_master_rank = status.MPI_SOURCE; //comm_->getLeader();
494 
495  char *buffer = new char[buffer_size];
496  MPI_Recv(buffer, buffer_size, MPI_CHAR, opt_master_rank,
498  master_node_->store(buffer, buffer_size);
499  delete[] buffer;
500 
501  dump.clear();
502  dump.str(std::string());
503  dump << "getting " << buffer_size << " bytes from OPT "
504  << opt_master_rank << std::endl;
505  job_trace_->log(dump);
506 
507  // and then continue collecting all other solution states
508  std::ostringstream states;
509  master_node_->collect(states);
510  buffer_size = states.str().length();
511 
512  dump.clear();
513  dump.str(std::string());
514  dump << "collected solution states of other PILOTS: "
515  << buffer_size << " bytes" << std::endl;
516  job_trace_->log(dump);
517 
518  // send collected solution states to optimizer;
519  MPI_Send(&buffer_size, 1, MPI_UNSIGNED_LONG, opt_master_rank,
521 
522  buffer = new char[buffer_size];
523  memcpy(buffer, states.str().c_str(), buffer_size);
524  MPI_Send(buffer, buffer_size, MPI_CHAR, opt_master_rank,
526 
527  dump.clear();
528  dump.str(std::string());
529  dump << "sent set of new solutions to OPT" << std::endl;
530  job_trace_->log(dump);
531 
532  delete[] buffer;
533 
534  return true;
535  }
536 
537  case OPT_CONVERGED_TAG: {
538  return stop();
539  }
540 
542  is_worker_idle_[status.MPI_SOURCE] = true;
543  return true;
544  }
545 
546  default: {
547  std::string msg = "(Pilot) Error: unexpected MPI_TAG: ";
548  msg += status.MPI_TAG;
549  throw OptPilotException("Pilot::onMessage", msg);
550  }
551  }
552  }
553 
554  bool stop(bool isOpt = true) {
555 
556  if(has_opt_converged_) return true;
557 
558  has_opt_converged_ = true;
559  request_queue_.clear();
560  size_t dummy = 0;
561  MPI_Request req;
562  MPI_Isend(&dummy, 1, MPI_UNSIGNED_LONG, comm_->getLeader(), MPI_STOP_TAG, opt_comm_, &req);
563 
564  if(! isOpt) return true;
565  if(num_coworkers_ <= 1) return true;
566 
567  if(! cmd_args_->getArg<bool>("one-pilot-converge", false, false))
568  return true;
569 
570  // propagate converged message to other pilots
571  // FIXME what happens if two island converge at the same time?
572  int my_rank = 0;
573  MPI_Comm_rank(coworker_comm_, &my_rank);
574  for(int i=0; i < num_coworkers_; i++) {
575  if(i == my_rank) continue;
576  MPI_Request req;
577  MPI_Isend(&dummy, 1, MPI_UNSIGNED_LONG, i, OPT_CONVERGED_TAG, coworker_comm_, &req);
578  }
579 
580  return true;
581  }
582 
583 
584  // we overwrite run here to handle polling on two different communicators
585  //XXX: would be nice to give the poller interface an array of comms and
586  // listeners to be called..
587  void run() {
588 
589  MPI_Request opt_request;
590  MPI_Request worker_request;
591  MPI_Status status;
592  int flag = 0;
593  size_t recv_value_worker = 0;
594  size_t recv_value_opt = 0;
595 
596  setupPoll();
597 
598  MPI_Irecv(&recv_value_opt, 1, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE,
599  MPI_ANY_TAG, opt_comm_, &opt_request);
600  MPI_Irecv(&recv_value_worker, 1, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE,
601  MPI_ANY_TAG, worker_comm_, &worker_request);
602 
603  bool pending_opt_request = true;
604  bool pending_worker_request = true;
605  bool pending_pilot_request = false;
606 
607  MPI_Request pilot_request;
608  size_t recv_value_pilot = 0;
609  if(cmd_args_->getArg<bool>("one-pilot-converge", false, false)) {
610  MPI_Irecv(&recv_value_pilot, 1, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE,
611  MPI_ANY_TAG, coworker_comm_, &pilot_request);
612  pending_pilot_request = true;
613  }
614 
615  while(continue_polling_) {
616 
617  prePoll();
618 
619  if(opt_request != MPI_REQUEST_NULL) {
620  MPI_Test(&opt_request, &flag, &status);
621  if(flag) {
622  pending_opt_request = false;
623  if(status.MPI_TAG == MPI_STOP_TAG) {
624  return;
625  } else {
626  if(onMessage(status, recv_value_opt)) {
627  MPI_Irecv(&recv_value_opt, 1, MPI_UNSIGNED_LONG,
628  MPI_ANY_SOURCE, MPI_ANY_TAG, opt_comm_,
629  &opt_request);
630  pending_opt_request = true;
631  } else
632  return;
633  }
634  }
635  }
636 
637  if(worker_request != MPI_REQUEST_NULL) {
638  MPI_Test(&worker_request, &flag, &status);
639  if(flag) {
640  pending_worker_request = false;
641  if(status.MPI_TAG == MPI_STOP_TAG) {
642  return;
643  } else {
644  if(onMessage(status, recv_value_worker)) {
645  MPI_Irecv(&recv_value_worker, 1,
646  MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, MPI_ANY_TAG,
647  worker_comm_, &worker_request);
648  pending_worker_request = true;
649  } else
650  return;
651  }
652  }
653  }
654 
655  if(cmd_args_->getArg<bool>("one-pilot-converge", false, false)) {
656  if(pilot_request != MPI_REQUEST_NULL) {
657  MPI_Test(&pilot_request, &flag, &status);
658  if(flag) {
659  pending_pilot_request = false;
660  if(status.MPI_TAG == OPT_CONVERGED_TAG) {
661  stop(false);
662  } else {
663  MPI_Irecv(&recv_value_pilot, 1,
664  MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, MPI_ANY_TAG,
665  coworker_comm_, &pilot_request);
666  pending_pilot_request = true;
667  }
668  }
669  }
670  }
671 
672  postPoll();
673  }
674 
675  if(pending_opt_request) MPI_Cancel( &opt_request );
676  if(pending_worker_request) MPI_Cancel( &worker_request );
677  if(pending_pilot_request) MPI_Cancel( &pilot_request );
678  }
679 
680 };
681 
682 #endif
std::map< size_t, std::pair< Param_t, reqVarContainer_t > > Jobs_t
keep track of requests and running jobs
Definition: Pilot.h:179
void setup(functionDictionary_t known_expr_funcs)
Definition: Pilot.h:188
Implements a node in the network of all pilots, exposing store and collect operations on a specific s...
Definition: MasterNode.h:38
std::map< std::string, reqVarInfo_t > reqVarContainer_t
Definition: Types.h:64
virtual void onStop()
enable implementation to react to STOP tag
Definition: Pilot.h:354
virtual void startOptimizer()
Definition: Pilot.h:254
void parseInputFile(functionDictionary_t known_expr_funcs, bool isOptimizationRun)
Definition: Pilot.h:219
boost::scoped_ptr< Trace > job_trace_
Definition: Pilot.h:185
#define MPI_EXCHANGE_SOL_STATE_DATA_TAG
Definition: MPIHelper.h:39
bool continue_polling_
Definition: Pilot.h:166
std::string input_file_
input file for simulation with embedded optimization problem
Definition: Pilot.h:162
MPI_Comm worker_comm_
MPI communicator used for messages to/from worker.
Definition: Pilot.h:142
constexpr double e
The value of .
Definition: Physics.h:40
#define MPI_EXCHANGE_SOL_STATE_RES_TAG
Definition: MPIHelper.h:41
virtual void startWorker()
Definition: Pilot.h:270
Pilot(CmdArguments_t args, boost::shared_ptr< Comm_t > comm, const DVarContainer_t &dvar)
Definition: Pilot.h:91
boost::shared_ptr< CmdArguments > CmdArguments_t
Definition: CmdArguments.h:169
#define MPI_WORKER_FINISHED_ACK_TAG
pilot notifies worker that he is ready to collect the results
Definition: MPIHelper.h:17
boost::scoped_ptr< MasterNode_t > master_node_
Definition: Pilot.h:159
A worker MPI entity consists of a processor group that runs a simulation of type Sim_t. The main loop in run() accepts new jobs from the master process runs the simulation and reports back the results.
Definition: Worker.h:28
void MPI_Send_reqvars(reqVarContainer_t reqvars, size_t pid, MPI_Comm comm)
Definition: MPIHelper.cpp:125
int num_coworkers_
Definition: Pilot.h:155
Pilot(CmdArguments_t args, boost::shared_ptr< Comm_t > comm, functionDictionary_t known_expr_funcs, const DVarContainer_t &dvar, const Expressions::Named_t &obj, const Expressions::Named_t &cons, std::vector< double > hypervolRef={}, bool isOptimizerRun=true)
Definition: Pilot.h:110
#define MPI_EXCHANGE_SOL_STATE_RES_SIZE_TAG
Definition: MPIHelper.h:40
virtual void sendNewJobToWorker(int worker)
Definition: Pilot.h:390
constexpr double alpha
The fine structure constant, no dimension.
Definition: Physics.h:79
Jobs_t::iterator JobIter_t
Definition: Pilot.h:180
Expressions::Named_t objectives_
objectives
Definition: Pilot.h:168
namedVariableCollection_t Param_t
Definition: Types.h:33
MasterNode< typename Opt_t::SolutionState_t, SolPropagationGraph_t > MasterNode_t
Definition: Pilot.h:158
MPI_Comm coworker_comm_
MPI communicator used for messages between all pilots.
Definition: Pilot.h:146
Definition: Trace.h:13
virtual void startPilot()
Definition: Pilot.h:292
virtual void prePoll()
executed before checking for new request
Definition: Pilot.h:350
virtual ~Pilot()
Definition: Pilot.h:129
Pilot(CmdArguments_t args, boost::shared_ptr< Comm_t > comm, functionDictionary_t known_expr_funcs)
Definition: Pilot.h:101
int total_available_workers_
Definition: Pilot.h:164
#define MPI_WORK_JOBID_TAG
unique id of the job
Definition: MPIHelper.h:32
Expressions::Named_t constraints_
constraints
Definition: Pilot.h:169
void MPI_Recv_params(Param_t &params, size_t pid, MPI_Comm comm)
Definition: MPIHelper.cpp:107
int global_rank_
Definition: Pilot.h:151
DVarContainer_t dvars_
design variables
Definition: Pilot.h:170
virtual bool onMessage(MPI_Status status, size_t recv_value)
Definition: Pilot.h:419
std::map< std::string, DVar_t > DVarContainer_t
Definition: Types.h:77
void MPI_Send_params(Param_t params, size_t pid, MPI_Comm comm)
Definition: MPIHelper.cpp:66
int my_rank_in_opt_comm_
Definition: Pilot.h:153
std::map< std::string, Expressions::Expr_t * > Named_t
type of an expressions with a name
Definition: Expression.h:45
boost::shared_ptr< Comm_t > comm_
Definition: Pilot.h:148
CmdArguments_t cmd_args_
Definition: Pilot.h:149
void MPI_Recv_reqvars(reqVarContainer_t &reqvars, size_t pid, MPI_Comm comm)
Definition: MPIHelper.cpp:144
int my_rank_in_worker_comm_
Definition: Pilot.h:152
MPITag_t
Definition: MPIHelper.h:51
virtual void setupPoll()
executed before starting polling loop
Definition: Pilot.h:346
std::vector< bool > is_worker_idle_
Definition: Pilot.h:175
void run()
Definition: Pilot.h:587
std::string::iterator iterator
Definition: MSLang.h:16
Jobs_t request_queue_
Definition: Pilot.h:182
Poller(MPI_Comm comm, double delay=0.1)
Definition: Poller.h:22
bool stop(bool isOpt=true)
Definition: Pilot.h:554
#define MPI_OPT_JOB_FINISHED_TAG
pilot tells optimizer that results are ready to collect
Definition: MPIHelper.h:26
std::map< std::string, client::function::type > functionDictionary_t
Definition: Expression.h:27
std::vector< double > hypervolRef_
hypervolume reference point
Definition: Pilot.h:171
An interface implementing the basics of a poll loop, posting an MPI_Irecv and waiting for new request...
Definition: Poller.h:18
MPI_Comm opt_comm_
MPI communicator used for messages to/from optimizer.
Definition: Pilot.h:144
The Optimization Pilot (Master): Coordinates requests by optimizer to workers and reports results bac...
Definition: Pilot.h:85
MPI_Comm comm_m
communicator the poller listens to requests
Definition: Poller.h:35
bool has_opt_converged_
Definition: Pilot.h:165
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
virtual void postPoll()
executed after handling (if any) new request
Definition: Pilot.h:358
#define MPI_STOP_TAG
global stop tag to exit poll loop (
Definition: MPIHelper.h:44
Jobs_t running_job_list_
Definition: Pilot.h:181