Geant4  v4-10.4-release
 모두 클래스 네임스페이스들 파일들 함수 변수 타입정의 열거형 타입 열거형 멤버 Friends 매크로 그룹들 페이지들
G4MPImanager.cc
이 파일의 문서화 페이지로 가기
1 //
2 // ********************************************************************
3 // * License and Disclaimer *
4 // * *
5 // * The Geant4 software is copyright of the Copyright Holders of *
6 // * the Geant4 Collaboration. It is provided under the terms and *
7 // * conditions of the Geant4 Software License, included in the file *
8 // * LICENSE and available at http://cern.ch/geant4/license . These *
9 // * include a list of copyright holders. *
10 // * *
11 // * Neither the authors of this software system, nor their employing *
12 // * institutes,nor the agencies providing financial support for this *
13 // * work make any representation or warranty, express or implied, *
14 // * regarding this software system or assume any liability for its *
15 // * use. Please see the license in the file LICENSE and URL above *
16 // * for the full disclaimer and the limitation of liability. *
17 // * *
18 // * This code implementation is the result of the scientific and *
19 // * technical work of the GEANT4 collaboration. *
20 // * By using, copying, modifying or distributing the software (or *
21 // * any work based on the software) you agree to acknowledge its *
22 // * use in resulting scientific publications, and indicate your *
23 // * acceptance of all terms of the Geant4 Software license. *
24 // ********************************************************************
27 
28 #include "mpi.h"
29 #include <getopt.h>
30 #include <stdio.h>
31 #include <time.h>
32 #include "G4Run.hh"
33 #include "G4RunManager.hh"
34 #include "G4StateManager.hh"
35 #include "G4UIcommand.hh"
36 #include "G4UImanager.hh"
37 #include "G4MPIbatch.hh"
38 #include "G4MPImanager.hh"
39 #include "G4MPImessenger.hh"
41 #include "G4MPIsession.hh"
42 #include "G4MPIstatus.hh"
43 
45 
46 // --------------------------------------------------------------------------
47 namespace {
48 
49 // wrappers for thread functions
50 void thread_ExecuteThreadCommand(const G4String* command)
51 {
52  G4MPImanager::GetManager()-> ExecuteThreadCommand(*command);
53 }
54 
55 // --------------------------------------------------------------------------
56 void Wait(G4int ausec)
57 {
58  struct timespec treq, trem;
59  treq.tv_sec = 0;
60  treq.tv_nsec = ausec*1000;
61 
62  nanosleep(&treq, &trem);
63 }
64 
65 } // end of namespace
66 
67 // --------------------------------------------------------------------------
69  : verbose_(0), qfcout_(false), qinitmacro_(false), qbatchmode_(false),
70  thread_id_(0), master_weight_(1.)
71 {
72  //MPI::Init();
73  MPI::Init_thread(MPI::THREAD_SERIALIZED);
74  Initialize();
75 }
76 
77 // --------------------------------------------------------------------------
78 G4MPImanager::G4MPImanager(int argc, char** argv)
79  : verbose_(0), qfcout_(false), qinitmacro_(false), qbatchmode_(false),
80  thread_id_(0), master_weight_(1.)
81 {
82  //MPI::Init(argc, argv);
83  MPI::Init_thread(argc, argv, MPI::THREAD_SERIALIZED);
84  Initialize();
85  ParseArguments(argc, argv);
86 }
87 
88 // --------------------------------------------------------------------------
90 {
91  if( is_slave_ && qfcout_ ) fscout_.close();
92 
93  delete status_;
94  delete messenger_;
95  delete session_;
96 
97  COMM_G4COMMAND_.Free();
98 
99  MPI::Finalize();
100 }
101 
102 // --------------------------------------------------------------------------
104 {
105  if ( g4mpi_ == NULL ) {
106  G4Exception("G4MPImanager::GetManager()", "MPI001",
107  FatalException, "G4MPImanager is not instantiated.");
108  }
109  return g4mpi_;
110 }
111 
112 // --------------------------------------------------------------------------
114 {
115  if ( g4mpi_ != NULL ) {
116  G4Exception("G4MPImanager::Initialize()", "MPI002",
117  FatalException, "G4MPImanager is already instantiated.");
118  }
119 
120  g4mpi_ = this;
121 
122  // get rank information
123  size_ = MPI::COMM_WORLD.Get_size();
124  rank_ = MPI::COMM_WORLD.Get_rank();
127 
128  // initialize MPI communicator
129  COMM_G4COMMAND_ = MPI::COMM_WORLD.Dup();
130 
131  // new G4MPI stuffs
132  messenger_ = new G4MPImessenger();
133  messenger_-> SetTargetObject(this);
134  session_ = new G4MPIsession;
135  status_ = new G4MPIstatus;
136 
137  // default seed generator is random generator.
139  DistributeSeeds();
140 }
141 
142 // --------------------------------------------------------------------------
143 void G4MPImanager::ParseArguments(int argc, char** argv)
144 {
145  G4int qhelp = 0;
146  G4String ofprefix = "mpi";
147 
148  G4int c;
149  while ( 1 ) {
150  G4int option_index = 0;
151  static struct option long_options[] = {
152  {"help", no_argument, NULL, 'h'},
153  {"verbose", no_argument, NULL, 'v'},
154  {"init", required_argument, NULL, 'i'},
155  {"ofile", optional_argument, NULL, 'o'},
156  {NULL, 0, NULL, 0}
157  };
158 
159  opterr = 0; // suppress message
160  c = getopt_long(argc, argv, "hvi:o", long_options, &option_index);
161  opterr = 1;
162 
163  if( c == -1 ) break;
164 
165  switch (c) {
166  case 'h' :
167  qhelp = 1;
168  break;
169  case 'v' :
170  verbose_ = 1;
171  break;
172  case 'i' :
173  qinitmacro_ = true;
174  init_file_name_ = optarg;
175  break;
176  case 'o' :
177  qfcout_ = true;
178  if ( optarg ) ofprefix = optarg;
179  break;
180  default:
181  G4cerr << "*** invalid options specified." << G4endl;
182  std::exit(EXIT_FAILURE);
183  break;
184  }
185  }
186 
187  // show help
188  if ( qhelp ) {
189  if ( is_master_ ) ShowHelp();
190  MPI::Finalize();
191  std::exit(EXIT_SUCCESS);
192  }
193 
194  // file output
195  if( is_slave_ && qfcout_ ) {
196  G4String prefix = ofprefix + ".%03d" + ".cout";
197  char str[1024];
198  sprintf(str, prefix.c_str(), rank_);
199  G4String fname(str);
200  fscout_.open(fname.c_str(), std::ios::out);
201  }
202 
203  // non-option ARGV-elements ...
204  if ( optind < argc ) {
205  qbatchmode_ = true;
206  macro_file_name_ = argv[optind];
207  }
208 }
209 
210 // ====================================================================
212 {
214  const G4Run* run = runManager-> GetCurrentRun();
215 
216  G4int runid, eventid, neventTBP;
217 
219  G4ApplicationState g4state = stateManager-> GetCurrentState();
220 
221  if ( run ) {
222  runid = run-> GetRunID();
223  neventTBP = run -> GetNumberOfEventToBeProcessed();
224  eventid = run-> GetNumberOfEvent();
225  if( g4state == G4State_GeomClosed || g4state == G4State_EventProc ) {
226  status_-> StopTimer();
227  }
228  } else {
229  runid = 0;
230  eventid = 0;
231  neventTBP = 0;
232  }
233 
234  status_-> SetStatus(rank_, runid, neventTBP, eventid, g4state);
235 }
236 
237 // --------------------------------------------------------------------------
239 {
241 
242  UpdateStatus();
243  G4bool gstatus = CheckThreadStatus();
244 
245  if ( is_master_ ) {
246  status_-> Print(); // for maser itself
247 
248  G4int nev = status_-> GetEventID();
249  G4int nevtp = status_-> GetNEventToBeProcessed();
250  G4double cputime = status_-> GetCPUTime();
251 
252  // receive from each slave
253  for ( G4int islave = 1; islave < size_; islave++ ) {
255  islave, kTAG_G4STATUS);
256  status_-> UnPack(buff);
257  status_-> Print();
258 
259  // aggregation
260  nev += status_-> GetEventID();
261  nevtp += status_-> GetNEventToBeProcessed();
262  cputime += status_-> GetCPUTime();
263  }
264 
265  G4String strStatus;
266  if ( gstatus ) {
267  strStatus = "Run";
268  } else {
269  strStatus = "Idle";
270  }
271 
272  G4cout << "-------------------------------------------------------"
273  << G4endl
274  << "* #ranks= " << size_
275  << " event= " << nev << "/" << nevtp
276  << " state= " << strStatus
277  << " time= " << cputime << "s"
278  << G4endl;
279  } else {
280  status_-> Pack(buff);
283  }
284 }
285 
286 // ====================================================================
288 {
289  std::vector<G4long> seed_list = seed_generator_-> GetSeedList();
290  G4Random::setTheSeed(seed_list[rank_]);
291 }
292 
293 // --------------------------------------------------------------------------
295 {
296  G4long buff;
297 
298  if ( is_master_ ) {
299  // print master
300  G4cout << "* rank= " << rank_
301  << " seed= " << G4Random::getTheSeed()
302  << G4endl;
303  // receive from each slave
304  for ( G4int islave = 1; islave < size_; islave++ ) {
305  COMM_G4COMMAND_.Recv(&buff, 1, MPI::LONG, islave, kTAG_G4SEED);
306  G4cout << "* rank= " << islave
307  << " seed= " << buff
308  << G4endl;
309  }
310  } else { // slaves
311  buff = G4Random::getTheSeed();
312  COMM_G4COMMAND_.Send(&buff, 1, MPI::LONG, kRANK_MASTER, kTAG_G4SEED);
313  }
314 }
315 
316 // --------------------------------------------------------------------------
318 {
319  if( rank_ == inode ) {
321  }
322 }
323 
324 // ====================================================================
326 {
327  unsigned buff;
328  unsigned qstatus = 0;
329 
330  if( is_master_ ) {
331  qstatus = (thread_id_ != 0);
332  // get slave status
333  for ( G4int islave = 1; islave < size_; islave++ ) {
334  MPI::Request request = COMM_G4COMMAND_.Irecv(&buff, 1, MPI::UNSIGNED,
335  islave, kTAG_G4STATUS);
336  while( ! request.Test() ) {
337  ::Wait(1000);
338  }
339  qstatus |= buff;
340  }
341  } else {
342  buff = (thread_id_ !=0);
343  COMM_G4COMMAND_.Send(&buff, 1, MPI::UNSIGNED, kRANK_MASTER, kTAG_G4STATUS);
344  }
345 
346  // broadcast
347  buff = qstatus; // for master
348  COMM_G4COMMAND_.Bcast(&buff, 1, MPI::UNSIGNED, kRANK_MASTER);
349  qstatus = buff; // for slave
350 
351  if ( qstatus != 0 ) return true;
352  else return false;
353 }
354 
355 // --------------------------------------------------------------------------
357 {
358  // this method is a thread function.
360  G4int rc = UI-> ApplyCommand(command);
361 
362  G4int commandStatus = rc - (rc%100);
363 
364  switch( commandStatus ) {
365  case fCommandSucceeded:
366  break;
368  G4cerr << "illegal application state -- command refused" << G4endl;
369  break;
370  default:
371  G4cerr << "command refused (" << commandStatus << ")" << G4endl;
372  break;
373  }
374 
375  // thread is joined
376  if ( thread_id_ ) {
377  pthread_join(thread_id_, 0);
378  thread_id_ = 0;
379  }
380 
381  return;
382 }
383 
384 // --------------------------------------------------------------------------
386 {
387  G4bool threadStatus = CheckThreadStatus();
388 
389  if (threadStatus) {
390  if ( is_master_ ) {
391  G4cout << "G4MPIsession:: beamOn is still running." << G4endl;
392  }
393  } else { // ok
394  static G4String cmdstr;
395  cmdstr = command;
396  G4int rc = pthread_create(&thread_id_, 0,
397  (Func_t)thread_ExecuteThreadCommand,
398  (void*)&cmdstr);
399  if (rc != 0)
400  G4Exception("G4MPImanager::ExecuteBeamOnThread()",
401  "MPI003", FatalException,
402  "Failed to create a beamOn thread.");
403  }
404 }
405 
406 // --------------------------------------------------------------------------
408 {
409  if ( thread_id_ ) {
410  pthread_join(thread_id_, 0);
411  thread_id_ = 0;
412  }
413 }
414 
415 // ====================================================================
417 {
418  enum { kBUFF_SIZE = 512 };
419  static char sbuff[kBUFF_SIZE];
420  command.copy(sbuff, kBUFF_SIZE);
421  G4int len = command.size();
422  sbuff[len] ='\0'; // no boundary check
423 
424  // "command" is not yet fixed in slaves at this time.
425 
426  // waiting message exhausts CPU in LAM!
427  //COMM_G4COMMAND_.Bcast(sbuff, ssize, MPI::CHAR, RANK_MASTER);
428 
429  // another implementation
430  if( is_master_ ) {
431  for ( G4int islave = 1; islave < size_; islave++ ) {
432  COMM_G4COMMAND_.Send(sbuff, kBUFF_SIZE, MPI::CHAR,
433  islave, kTAG_G4COMMAND);
434  }
435  } else {
436  // try non-blocking receive
437  MPI::Request request= COMM_G4COMMAND_.Irecv(sbuff, kBUFF_SIZE, MPI::CHAR,
439  // polling...
440  while(! request.Test()) {
441  ::Wait(1000);
442  }
443  }
444 
445  return G4String(sbuff);
446 }
447 
448 // ====================================================================
450 {
451  G4bool currentmode = qbatchmode_;
452  qbatchmode_ = true;
453  G4MPIbatch* batchSession = new G4MPIbatch(fname, qbatch);
454  batchSession-> SessionStart();
455  delete batchSession;
456  qbatchmode_ = currentmode;
457 }
458 
459 // --------------------------------------------------------------------------
461 {
462 #ifndef G4MULTITHREADED
464 #endif
465 
466  if ( qdivide ) { // events are divided
467  G4double ntot = master_weight_ + size_ - 1.;
468  G4int nproc = G4int(nevent/ntot);
469  G4int nproc0 = nevent - nproc*(size_-1);
470 
471  if ( verbose_ > 0 && is_master_ ) {
472  G4cout << "#events in master=" << nproc0 << " / "
473  << "#events in slave=" << nproc << G4endl;
474  }
475 
476  status_-> StartTimer(); // start timer
477 
478 #ifdef G4MULTITHREADED
479  G4String str_nevt;
480  if ( is_master_ ) str_nevt = G4UIcommand::ConvertToString(nproc0);
481  else str_nevt = G4UIcommand::ConvertToString(nproc);
483  UI-> ApplyCommand("/run/beamOn " + str_nevt);
484 #else
485  if ( is_master_ ) runManager-> BeamOn(nproc0);
486  else runManager-> BeamOn(nproc);
487 #endif
488 
489  status_-> StopTimer(); // stop timer
490 
491  } else { // same events are generated in each node (for test use)
492  if( verbose_ > 0 && is_master_ ) {
493  G4cout << "#events in master=" << nevent << " / "
494  << "#events in slave=" << nevent << G4endl;
495  }
496  status_-> StartTimer(); // start timer
497 
498 #ifdef G4MULTITHREADED
499  G4String str_nevt = G4UIcommand::ConvertToString(nevent);
501  UI-> ApplyCommand("/run/beamOn " + str_nevt);
502 #else
503  runManager-> BeamOn(nevent);
504 #endif
505 
506  status_-> StopTimer(); // stop timer
507  }
508 }
509 
510 // --------------------------------------------------------------------------
512 {
513  G4int buff = 0;
514  if ( qbatchmode_ ) { // valid only in batch mode
515  if ( is_master_ ) {
516  // receive from each slave
517  for (G4int islave = 1; islave < size_; islave++) {
518  MPI::Request request = COMM_G4COMMAND_.Irecv(&buff, 1, MPI::INT,
519  islave, kTAG_G4STATUS);
520  while(! request.Test()) {
521  ::Wait(1000);
522  }
523  }
524  } else {
525  buff = 1;
527  }
528  }
529 }
530 
531 // --------------------------------------------------------------------------
533 {
534  if ( is_master_ ){
535  std::cout << message << std::flush;
536  } else {
537  if ( qfcout_ ) { // output to a file
538  fscout_ << message << std::flush;
539  } else { // output to stdout
540  std::cout << rank_ << ":" << message << std::flush;
541  }
542  }
543 }
544 
545 // --------------------------------------------------------------------------
547 {
548  if (is_slave_ ) return;
549 
550  G4cout << "Geant4 MPI interface" << G4endl;
551  G4cout << "usage:" << G4endl;
552  G4cout << "<app> [options] [macro file]"
553  << G4endl << G4endl;
554  G4cout << " -h, --help show this message."
555  << G4endl;
556  G4cout << " -v, --verbose show verbose message"
557  << G4endl;
558  G4cout << " -i, --init=FNAME set an init macro file"
559  << G4endl;
560  G4cout << " -o, --ofile[=FNAME] set slave output to a flie"
561  << G4endl;
562  G4cout << G4endl;
563 }
const XML_Char int len
Definition: expat.h:262
static G4RunManager * GetRunManager()
Definition: G4RunManager.cc:80
G4MPIstatus * status_
void Initialize()
static void setTheSeed(long seed, int lux=3)
Definition: Random.cc:231
MPI::Intracomm COMM_G4COMMAND_
#define G4endl
Definition: G4ios.hh:61
Define MPI commands.
void message(RunManager *runmanager)
Definition: ts_scorers.cc:72
void ShowSeeds()
void *(* Func_t)(void *)
static G4MPImanager * g4mpi_
void SetSeed(G4int inode, G4long seed)
void ExecuteBeamOnThread(const G4String &command)
G4bool qinitmacro_
MPI batch session.
A terminal session for MPI application.
pthread_t thread_id_
static G4UImanager * GetUIpointer()
Definition: G4UImanager.cc:73
G4bool qbatchmode_
G4double master_weight_
G4bool is_master_
G4String macro_file_name_
G4String init_file_name_
G4MPIsession * session_
status of MPI application
double G4double
Definition: G4Types.hh:76
bool G4bool
Definition: G4Types.hh:79
G4bool is_slave_
void ShowStatus()
G4MPImessenger * messenger_
long G4long
Definition: G4Types.hh:80
long seed
Definition: chem4.cc:68
G4String BcastCommand(const G4String &command)
static G4MPImanager * GetManager()
void DistributeSeeds()
MPI manager class.
void JoinBeamOnThread()
void ExecuteThreadCommand(const G4String &command)
G4GLOB_DLL std::ostream G4cerr
Definition: G4Run.hh:46
void BeamOn(G4int nevent, G4bool qdivide=true)
static G4String ConvertToString(G4bool boolVal)
Definition: G4UIcommand.cc:374
G4bool CheckThreadStatus()
Int_t nevent
void G4Exception(const char *originOfException, const char *exceptionCode, G4ExceptionSeverity severity, const char *description)
Definition: G4Exception.hh:65
int G4int
Definition: G4Types.hh:78
void Finalize()
Definition: errprop.cc:257
An implementation of random number seed distribution.
void Print(const G4String &message)
G4GLOB_DLL std::ostream G4cout
void UpdateStatus()
G4ApplicationState
void ShowHelp() const
G4VMPIseedGenerator * seed_generator_
void ExecuteMacroFile(const G4String &fname, G4bool qbatch=false)
std::ofstream fscout_
static G4StateManager * GetStateManager()
void ParseArguments(G4int argc, char **argv)
void WaitBeamOn()