96 ,
class SolPropagationGraph_t
121 setup(known_expr_funcs);
129 std::vector<double> hypervolRef = {},
130 bool isOptimizerRun =
true,
131 const std::map<std::string, std::string> &userVariables = {})
141 setup(known_expr_funcs, userVariables);
172 typedef MasterNode<
typename Opt_t::SolutionState_t,
193 typedef std::map<size_t, std::pair<Param_t, reqVarContainer_t> >
Jobs_t;
203 const std::map<std::string, std::string> &userVariables) {
207 std::cout <<
"\033[01;35m";
209 std::cout <<
" | | (_) | | | " <<
std::endl;
210 std::cout <<
" ___ _ __ | |_ ______ _ __ _| | ___ | |_ " <<
std::endl;
211 std::cout <<
" / _ \\| '_ \\| __|______| '_ \\| | |/ _ \\| __|" <<
std::endl;
212 std::cout <<
"| (_) | |_) | |_ | |_) | | | (_) | |_ " <<
std::endl;
213 std::cout <<
" \\___/| .__/ \\__| | .__/|_|_|\\___/ \\__|" <<
std::endl;
219 std::cout <<
"\e[0m";
223 MPI_Barrier(MPI_COMM_WORLD);
239 std::cout <<
"Could not find 'inputfile' in arguments.. Aborting."
246 "No objectives or dvars specified");
250 std::ostringstream os;
254 if (isOptimizationRun) {
258 os <<
" ✔ " <<
dvars_.size()
262 std::cout << os.str() << std::flush;
265 MPI_Barrier(MPI_COMM_WORLD);
271 std::ostringstream os;
272 os <<
"\033[01;35m" <<
" " <<
global_rank_ <<
" (PID: " << getpid() <<
") ▶ Opt"
274 std::cout << os.str() << std::flush;
276 const std::unique_ptr<Opt_t> opt(
285 void startWorker(
const std::map<std::string, std::string> &userVariables) {
287 std::ostringstream os;
288 os <<
"\033[01;35m" <<
" " <<
global_rank_ <<
" (PID: " << getpid() <<
") ▶ Worker"
290 std::cout << os.str() << std::flush;
294 if(pos != std::string::npos)
296 pos = tmplfile.find(
".");
297 std::string simName = tmplfile.substr(0,pos);
299 const std::unique_ptr< Worker<Sim_t> > w(
303 std::cout <<
"Stop Worker.." <<
std::endl;
309 std::ostringstream os;
310 os <<
"\033[01;35m" <<
" " <<
global_rank_ <<
" (PID: " << getpid() <<
") ▶ Pilot"
312 std::cout << os.str() << std::flush;
315 std::ostringstream trace_filename;
316 trace_filename <<
"pilot.trace." <<
comm_->getBundle().island_id;
319 std::shared_ptr<TraceComponent>(
new FileSink(trace_filename.str())));
343 size_t upperbound_buffer_size =
344 sizeof(double) * alpha * (1 + opt_size) * 1000
347 new MasterNode<
typename Opt_t::SolutionState_t,
348 SolPropagationGraph_t >(
350 comm_->getBundle().island_id));
357 std::cout <<
"Stop Pilot.." <<
std::endl;
377 bool all_worker_idle =
true;
397 MPI_Isend(&dummy, 1, MPI_INT, worker,
411 size_t jid = job->first;
413 Param_t job_params = job->second.first;
421 std::pair<Param_t, reqVarContainer_t> >(job->first, job->second));
425 std::ostringstream dump;
426 dump <<
"sent job with ID " << jid <<
" to worker " << worker
441 size_t job_id = recv_value;
444 MPI_Send(&dummy, 1, MPI_UNSIGNED_LONG, status.MPI_SOURCE,
453 std::ostringstream dump;
454 dump <<
"worker finished job with ID " << job_id <<
std::endl;
461 int opt_master_rank =
comm_->getLeader();
462 MPI_Send(&job_id, 1, MPI_UNSIGNED_LONG, opt_master_rank,
476 size_t job_id = recv_value;
477 int opt_master_rank =
comm_->getLeader();
485 std::pair<Param_t, reqVarContainer_t> job =
486 std::pair<Param_t, reqVarContainer_t>(job_params, reqVars);
488 std::pair<
size_t, std::pair<Param_t, reqVarContainer_t> >(
491 std::ostringstream dump;
492 dump <<
"new opt job with ID " << job_id <<
std::endl;
502 std::ostringstream dump;
503 dump <<
"starting solution exchange.. " << status.MPI_SOURCE <<
std::endl;
507 size_t buffer_size = recv_value;
508 int opt_master_rank = status.MPI_SOURCE;
510 char *buffer =
new char[buffer_size];
511 MPI_Recv(buffer, buffer_size, MPI_CHAR, opt_master_rank,
517 dump.str(std::string());
518 dump <<
"getting " << buffer_size <<
" bytes from OPT "
523 std::ostringstream states;
525 buffer_size = states.str().length();
528 dump.str(std::string());
529 dump <<
"collected solution states of other PILOTS: "
534 MPI_Send(&buffer_size, 1, MPI_UNSIGNED_LONG, opt_master_rank,
537 buffer =
new char[buffer_size];
538 memcpy(buffer, states.str().c_str(), buffer_size);
539 MPI_Send(buffer, buffer_size, MPI_CHAR, opt_master_rank,
543 dump.str(std::string());
544 dump <<
"sent set of new solutions to OPT" <<
std::endl;
562 std::string msg =
"(Pilot) Error: unexpected MPI_TAG: ";
563 msg += status.MPI_TAG;
579 if(! isOpt)
return true;
582 if(!
cmd_args_->getArg<
bool>(
"one-pilot-converge",
false,
false))
590 if(i == my_rank)
continue;
604 MPI_Request opt_request;
605 MPI_Request worker_request;
608 size_t recv_value_worker = 0;
609 size_t recv_value_opt = 0;
613 MPI_Irecv(&recv_value_opt, 1, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE,
615 MPI_Irecv(&recv_value_worker, 1, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE,
618 bool pending_opt_request =
true;
619 bool pending_worker_request =
true;
620 bool pending_pilot_request =
false;
622 MPI_Request pilot_request;
623 size_t recv_value_pilot = 0;
624 if(
cmd_args_->getArg<
bool>(
"one-pilot-converge",
false,
false)) {
625 MPI_Irecv(&recv_value_pilot, 1, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE,
627 pending_pilot_request =
true;
634 if(opt_request != MPI_REQUEST_NULL) {
635 MPI_Test(&opt_request, &flag, &status);
637 pending_opt_request =
false;
642 MPI_Irecv(&recv_value_opt, 1, MPI_UNSIGNED_LONG,
645 pending_opt_request =
true;
652 if(worker_request != MPI_REQUEST_NULL) {
653 MPI_Test(&worker_request, &flag, &status);
655 pending_worker_request =
false;
659 if(
onMessage(status, recv_value_worker)) {
660 MPI_Irecv(&recv_value_worker, 1,
661 MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, MPI_ANY_TAG,
663 pending_worker_request =
true;
670 if(
cmd_args_->getArg<
bool>(
"one-pilot-converge",
false,
false)) {
671 if(pilot_request != MPI_REQUEST_NULL) {
672 MPI_Test(&pilot_request, &flag, &status);
674 pending_pilot_request =
false;
678 MPI_Irecv(&recv_value_pilot, 1,
679 MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, MPI_ANY_TAG,
681 pending_pilot_request =
true;
690 if(pending_opt_request) MPI_Cancel( &opt_request );
691 if(pending_worker_request) MPI_Cancel( &worker_request );
692 if(pending_pilot_request) MPI_Cancel( &pilot_request );
virtual void sendNewJobToWorker(int worker)
virtual void prePoll()
executed before checking for new request
DVarContainer_t dvars_
design variables
int total_available_workers_
namedVariableCollection_t Param_t
virtual void postPoll()
executed after handling (if any) new request
int my_rank_in_worker_comm_
std::string input_file_
input file for simulation with embedded optimization problem
std::vector< bool > is_worker_idle_
Expressions::Named_t objectives_
objectives
void parseInputFile(functionDictionary_t, bool isOptimizationRun)
MasterNode< typename Opt_t::SolutionState_t, SolPropagationGraph_t > MasterNode_t
std::unique_ptr< Trace > job_trace_
virtual void onStop()
enable implementation to react to STOP tag
virtual void startPilot()
Jobs_t::iterator JobIter_t
MPI_Comm comm_m
communicator the poller listens to requests
std::vector< double > hypervolRef_
hypervolume reference point
std::map< std::string, DVar_t > DVarContainer_t
#define MPI_EXCHANGE_SOL_STATE_DATA_TAG
Expressions::Named_t constraints_
constraints
#define MPI_EXCHANGE_SOL_STATE_RES_SIZE_TAG
Inform & endl(Inform &inf)
std::map< std::string, Expressions::Expr_t * > Named_t
type of an expressions with a name
MPI_Comm worker_comm_
MPI communicator used for messages to/from worker.
Poller(MPI_Comm comm, double delay=0.1)
Pilot(CmdArguments_t args, std::shared_ptr< Comm_t > comm, functionDictionary_t known_expr_funcs, const DVarContainer_t &dvar, const Expressions::Named_t &obj, const Expressions::Named_t &cons, std::vector< double > hypervolRef={}, bool isOptimizerRun=true, const std::map< std::string, std::string > &userVariables={})
std::string::iterator iterator
#define MPI_WORK_JOBID_TAG
unique id of the job
std::unique_ptr< MasterNode_t > master_node_
virtual void startWorker(const std::map< std::string, std::string > &userVariables)
#define MPI_OPT_JOB_FINISHED_TAG
pilot tells optimizer that results are ready to collect
void MPI_Send_reqvars(reqVarContainer_t reqvars, size_t pid, MPI_Comm comm)
Pilot(CmdArguments_t args, std::shared_ptr< Comm_t > comm, const DVarContainer_t &dvar)
std::shared_ptr< CmdArguments > CmdArguments_t
#define MPI_EXCHANGE_SOL_STATE_RES_TAG
std::map< std::string, client::function::type > functionDictionary_t
Pilot(CmdArguments_t args, std::shared_ptr< Comm_t > comm, functionDictionary_t known_expr_funcs)
bool stop(bool isOpt=true)
#define MPI_STOP_TAG
global stop tag to exit poll loop (
#define MPI_WORKER_FINISHED_ACK_TAG
pilot notifies worker that he is ready to collect the results
void MPI_Recv_reqvars(reqVarContainer_t &reqvars, size_t pid, MPI_Comm comm)
constexpr double alpha
The fine structure constant, no dimension.
void setup(functionDictionary_t known_expr_funcs, const std::map< std::string, std::string > &userVariables)
virtual void startOptimizer()
virtual void setupPoll()
executed before starting polling loop
void MPI_Send_params(Param_t params, size_t pid, MPI_Comm comm)
std::shared_ptr< Comm_t > comm_
constexpr double e
The value of .
MPI_Comm opt_comm_
MPI communicator used for messages to/from optimizer.
void MPI_Recv_params(Param_t ¶ms, size_t pid, MPI_Comm comm)
virtual bool onMessage(MPI_Status status, size_t recv_value)
std::map< std::string, reqVarInfo_t > reqVarContainer_t
std::map< size_t, std::pair< Param_t, reqVarContainer_t > > Jobs_t
keep track of requests and running jobs
MPI_Comm coworker_comm_
MPI communicator used for messages between all pilots.