ESyS-Particle  2.3.4
pp_array.hpp
Go to the documentation of this file.
1 // //
3 // Copyright (c) 2003-2017 by The University of Queensland //
4 // Centre for Geoscience Computing //
5 // http://earth.uq.edu.au/centre-geoscience-computing //
6 // //
7 // Primary Business: Brisbane, Queensland, Australia //
8 // Licensed under the Open Software License version 3.0 //
9 // http://www.apache.org/licenses/LICENSE-2.0 //
10 // //
12 
14 #include "Foundation/console.h"
15 #include <iostream>
16 #include <sstream>
17 #include <stdexcept>
18 
19 #ifdef __INTEL_COMPILER
20 #include <mathimf.h>
21 #else
22 #include <math.h>
23 #endif
24 using std::cout;
25 using std::cerr;
26 using std::endl;
27 using std::flush;
28 
29 //--- TML includes ---
30 #include "ntable/src/nt_slab.h"
31 
32 //--- STL includes ---
33 #include <utility>
34 
35 using std::pair;
36 using std::make_pair;
37 
38 
39 
40 template<typename T>
42 
61 template<typename T>
62 ParallelParticleArray<T>::ParallelParticleArray(TML_Comm *comm, const std::vector<unsigned int> &dims,const Vec3& min, const Vec3& max ,double range, double alpha):AParallelParticleArray(comm, dims), m_nt(NULL)
63 {
64  //- set x-edges to non-circular
65  m_circ_edge_x_down=false;
66  m_circ_edge_x_up=false;
67  //- get own process coords
68  vector<int> pcoords=m_comm.get_coords();
69  //- initialize ntable
70  // get global number of cells
71  int nx_global,ny_global,nz_global;
72  nx_global=lrint((max[0]-min[0])/range);
73  ny_global=lrint((max[1]-min[1])/range);
74  nz_global=lrint((max[2]-min[2])/range);
75  if ((fabs((double(nx_global)-(max[0]-min[0])/range)) > 1e-6) ||
76  (fabs((double(ny_global)-(max[1]-min[1])/range)) > 1e-6) ||
77  (fabs((double(nz_global)-(max[2]-min[2])/range)) > 1e-6)){
78  m_nt=NULL;
79  std::stringstream msg;
80  msg << "size doesn't fit range" << endl; // replace by throw
81  msg << "diff x : " << double(nx_global)-(max[0]-min[0])/range << endl;
82  msg << "diff y : " << double(ny_global)-(max[1]-min[1])/range << endl;
83  msg << "diff z : " << double(nz_global)-(max[2]-min[2])/range << endl;
84  console.Error() << msg.str() << "\n";
85  throw std::runtime_error(msg.str());
86  } else {
87  // calc local min. cell, considering overlap
88  int nx_min=((nx_global*pcoords[0])/m_comm.get_dim(0))-1;
89  int ny_min=((ny_global*pcoords[1])/m_comm.get_dim(1))-1;
90  int nz_min=((nz_global*pcoords[2])/m_comm.get_dim(2))-1;
91  // calc local number of cells, considering overlap
92  int nx=(((nx_global*(pcoords[0]+1))/m_comm.get_dim(0))-nx_min)+1;
93  int ny=(((ny_global*(pcoords[1]+1))/m_comm.get_dim(1))-ny_min)+1;
94  int nz=(((nz_global*(pcoords[2]+1))/m_comm.get_dim(2))-nz_min)+1;
95  // init
96  m_nt=new NeighborTable<T>(nx,ny,nz,range,alpha,min,nx_min,ny_min,nz_min);
97  }
98  // init time stamp
99  m_timestamp=0;
100 }
101 
123 template<typename T>
124 ParallelParticleArray<T>::ParallelParticleArray(TML_Comm *comm, const vector<unsigned int> &dims, const vector<bool> &circ,const Vec3 &min,const Vec3 &max, double range, double alpha):AParallelParticleArray(comm,dims,circ), m_nt(NULL)
125 {
126  //- get own process coords
127  vector<int> pcoords=m_comm.get_coords();
128  //- initialize ntable
129  // get global number of cells
130  int nx_global,ny_global,nz_global;
131  nx_global=lrint((max[0]-min[0])/range);
132  ny_global=lrint((max[1]-min[1])/range);
133  nz_global=lrint((max[2]-min[2])/range);
134  if ((fabs((double(nx_global)-(max[0]-min[0])/range)) > 1e-6) ||
135  (fabs((double(ny_global)-(max[1]-min[1])/range)) > 1e-6) ||
136  (fabs((double(nz_global)-(max[2]-min[2])/range)) > 1e-6)){
137  m_nt=NULL;
138  std::stringstream msg;
139  msg << "size doesn't fit range" << endl; // replace by throw
140  msg << "diff x : " << double(nx_global)-(max[0]-min[0])/range << endl;
141  msg << "diff y : " << double(ny_global)-(max[1]-min[1])/range << endl;
142  msg << "diff z : " << double(nz_global)-(max[2]-min[2])/range << endl;
143  throw std::runtime_error(msg.str());
144  } else {
145  // calc local min. cell, considering overlap
146  int nx_min=((nx_global*pcoords[0])/m_comm.get_dim(0))-1;
147  int ny_min=((ny_global*pcoords[1])/m_comm.get_dim(1))-1;
148  int nz_min=((nz_global*pcoords[2])/m_comm.get_dim(2))-1;
149  // calc local number of cells, considering overlap
150  int nx=(((nx_global*(pcoords[0]+1))/m_comm.get_dim(0))-nx_min)+1;
151  int ny=(((ny_global*(pcoords[1]+1))/m_comm.get_dim(1))-ny_min)+1;
152  int nz=(((nz_global*(pcoords[2]+1))/m_comm.get_dim(2))-nz_min)+1;
153  // init local neighbortable
154  m_nt=new NeighborTable<T>(nx,ny,nz,range,alpha,min,nx_min,ny_min,nz_min);
155  // setup circular shift values
156  m_xshift=(max-min).X();
157  m_yshift=(max-min).Y();
158  m_zshift=(max-min).Z();
159  // setup circular edges
160  m_circ_edge_x_down=(circ[0] && (pcoords[0]==0)) ? true : false ;
161  m_circ_edge_x_up=(circ[0] && (pcoords[0]==m_comm.get_dim(0)-1)) ? true : false ;
162  }
163  // init time stamp
164  m_timestamp=0;
165 }
166 
170 template<typename T>
172 {
173  if(m_nt!=NULL) delete m_nt;
174 }
175 
181 template<typename T>
183 {
184  m_nt->insert(p);
185 }
186 
192 template<typename T>
193 void ParallelParticleArray<T>::insert(const vector<T>& vp)
194 {
195  for(typename vector<T>::const_iterator iter=vp.begin();
196  iter!=vp.end();
197  iter++){
198  m_nt->insert(*iter);
199  }
200 }
201 
207 template<typename T>
209 {
210  return m_nt->isInInner(pos);
211 }
212 
219 template<typename T>
221 {
222  return m_nt->ptr_by_id(id);
223 }
224 
231 template<typename T>
233 {
234  return m_nt->getNearestPtr(pos);
235 }
241 template<typename T>
243 {
244  // cout << "PPA at node " << m_comm.rank() << "rebuilding " << endl << flush;
245  //- get own process coords (for debug output)
246  vector<int> pcoords=m_comm.get_coords();
247 
248  // rebuild locally
249  // cout << "node " << m_comm.rank() << " pre-build " << *m_nt << endl;
250  m_nt->build();
251  // cout << "node " << m_comm.rank() << " pre-exchange " << *m_nt << endl;
252  //- exchange boundary particles
253  NTSlab<T> up_slab;
254  NTSlab<T> down_slab;
255  vector<T> recv_data;
256  bool circ_edge=false;
257 
258  // x-dir (there is at least one dimension)
259  if(m_comm.get_dim(0)>1){
260  // clean out boundary slabs
261  NTSlab<T> up_boundary_slab=m_nt->yz_slab(m_nt->xsize()-1);
262  up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
263  NTSlab<T> down_boundary_slab=m_nt->yz_slab(0);
264  down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
265 
266  // synchronize
267  m_comm.barrier();
268 
269  // define tranfer slabs
270  up_slab=m_nt->yz_slab(m_nt->xsize()-2);
271  down_slab=m_nt->yz_slab(1);
272 
273  // upstream
274  if(m_circ_edge_x_up){ // circ. bdry
275  // cout << "circular shift, node " << m_comm.rank() << " x-dim, up slab size " << up_slab.size() << endl;
276  // copy particles from transfer slab into buffer
277  vector<T> buffer;
278  for(typename NTSlab<T>::iterator iter=up_slab.begin();
279  iter!=up_slab.end();
280  iter++){
281  buffer.push_back(*iter);
282  }
283  // shift particles in buffer by circular shift value
284  for(typename vector<T>::iterator iter=buffer.begin();
285  iter!=buffer.end();
286  iter++){
287  iter->setCircular(Vec3(-1.0*m_xshift,0.0,0.0));
288  }
289  m_comm.shift_cont_packed(buffer,*m_nt,0,1,m_exchg_tag);
290  } else {
291  m_comm.shift_cont_packed(up_slab,*m_nt,0,1,m_exchg_tag);
292  }
293  // downstream
294  if(m_circ_edge_x_down){// circ. bdry
295  // cout << "circular shift , node " << m_comm.rank() << " x-dim, down slab size " << down_slab.size() << endl;
296  // copy particles from transfer slab into buffer
297  vector<T> buffer;
298  for(typename NTSlab<T>::iterator iter=down_slab.begin();
299  iter!=down_slab.end();
300  iter++){
301  buffer.push_back(*iter);
302  }
303  // shift particles in buffer by circular shift value
304  for(typename vector<T>::iterator iter=buffer.begin();
305  iter!=buffer.end();
306  iter++){
307  iter->setCircular(Vec3(m_xshift,0.0,0.0));
308  }
309  m_comm.shift_cont_packed(buffer,*m_nt,0,-1,m_exchg_tag);
310  } else {
311  m_comm.shift_cont_packed(down_slab,*m_nt,0,-1,m_exchg_tag);
312  }
313  }
314  // y-dir
315  if(m_comm.get_dim(1)>1){
316  // clean out boundary slabs
317  NTSlab<T> up_boundary_slab=m_nt->xz_slab(m_nt->ysize()-1);
318  up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
319  NTSlab<T> down_boundary_slab=m_nt->xz_slab(0);
320  down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
321 
322  // synchronize
323  m_comm.barrier();
324 
325  // define tranfer slabs
326  up_slab=m_nt->xz_slab(m_nt->ysize()-2);
327  down_slab=m_nt->xz_slab(1);
328  if(!circ_edge){ // normal boundary
329  // upstream
330  m_comm.shift_cont_packed(up_slab,*m_nt,1,1,m_exchg_tag);
331  // downstream
332  m_comm.shift_cont_packed(down_slab,*m_nt,1,-1,m_exchg_tag);
333  } else { // circular edge -> buffer & shift received particles
334  cout << "circular y shift not implemented" << endl;
335  }
336  }
337  // z-dir
338  if(m_comm.get_dim(2)>1){
339  // cout << "zdim= " << m_comm.get_dim(2) << " shifting" << endl;
340  // clean out boundary slabs
341  NTSlab<T> up_boundary_slab=m_nt->xy_slab(m_nt->zsize()-1);
342  up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
343  NTSlab<T> down_boundary_slab=m_nt->xy_slab(0);
344  down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
345 
346  // define tranfer slabs
347  up_slab=m_nt->xy_slab(m_nt->zsize()-2);
348  down_slab=m_nt->xy_slab(1);
349  if(!circ_edge){ // normal boundary
350  // upstream
351  m_comm.shift_cont_packed(up_slab,*m_nt,2,1,m_exchg_tag);
352  // downstream
353  m_comm.shift_cont_packed(down_slab,*m_nt,2,-1,m_exchg_tag);
354  } else { // circular edge -> buffer & shift received particles
355  cout << "circular x shift not implemented" << endl;
356  }
357  }
358  // update timestamp
359  m_timestamp++;
360  // debug
361  // cout << "node " << m_comm.rank() << "post-exchange " << *m_nt << flush;
362 }
363 
371 template<typename T>
372 template<typename P>
373 void ParallelParticleArray<T>::exchange(P (T::*rdf)(),void (T::*wrtf)(const P&))
374 {
375  // x-dir (there is at least one dimension)
376  if(m_comm.get_dim(0)>1){
377  // cout << "xdim= " << m_comm.get_dim(0) << " exchanging" << endl;
378  // do transfer
379  exchange_single(rdf,wrtf,m_nt->yz_slab(m_nt->xsize()-2),m_nt->yz_slab(0),0,1);
380  // downstream
381  exchange_single(rdf,wrtf,m_nt->yz_slab(1),m_nt->yz_slab(m_nt->xsize()-1),0,-1);
382  }
383  // y-dir
384  if(m_comm.get_dim(1)>1){
385  // cout << "ydim= " << m_comm.get_dim(1) << " shifting" << endl;
386  // upstream
387  exchange_single(rdf,wrtf,m_nt->xz_slab(m_nt->ysize()-2),m_nt->xz_slab(0),1,1);
388  // downstream
389  exchange_single(rdf,wrtf,m_nt->xz_slab(1),m_nt->xz_slab(m_nt->ysize()-1),1,-1);
390  }
391  // z-dir
392  if(m_comm.get_dim(2)>1){
393  // cout << "zdim= " << m_comm.get_dim(2) << " shifting" << endl;
394  // upstream
395  exchange_single(rdf,wrtf,m_nt->xy_slab(m_nt->zsize()-2),m_nt->xy_slab(0),2,1);
396  // downstream
397  exchange_single(rdf,wrtf,m_nt->xy_slab(1),m_nt->xy_slab(m_nt->zsize()-1),2,-1);
398  }
399 }
400 
411 template<typename T>
412 template<typename P>
413 void ParallelParticleArray<T>::exchange_single(P (T::*rdf)(),void (T::*wrtf)(const P&),
414  NTSlab<T> send_slab,NTSlab<T> recv_slab,
415  int dir,int dist)
416 {
417  vector<P> send_data;
418  vector<P> recv_data;
419 
420  // get data
421  for(typename NTSlab<T>::iterator iter=send_slab.begin();
422  iter!=send_slab.end();
423  iter++){
424  send_data.push_back(((*iter).*rdf)());
425 // cout << "put exchange values from particle " << iter->getID() << " into buffer" << endl;
426  }
427  // exchange
428  m_comm.shift_cont_packed(send_data,recv_data,dir,dist,m_exchg_tag);
429 
430  // apply received data
431  // check if sizes are correct
432  if(recv_data.size()==recv_slab.size()){
433  int count=0;
434  for(typename NTSlab<T>::iterator iter=recv_slab.begin();
435  iter!=recv_slab.end();
436  iter++){
437  ((*iter).*wrtf)(recv_data[count]);
438 // cout << "wrote exchange value to particle " << iter->getID() << endl;
439  count++;
440  }
441  }else{
442  cerr << "rank = " << m_comm.rank() << ":ParallelParticleArray<T>::exchange_single ERROR: size mismatch in received data, recv_data.size()!=recv_slab.size() - (" << recv_data.size() << "!=" << recv_slab.size() << "). dir = " << dir << ", dist = " << dist << endl;
443  }
444 }
445 
455 template<typename T>
456 void ParallelParticleArray<T>::forParticle(int id,void (T::*mf)())
457 {
458  T* pp=m_nt->ptr_by_id(id);
459  if(pp!=NULL){
460  (pp->*mf)();
461  }
462 }
463 
474 template<typename T>
475 template<typename P>
476 void ParallelParticleArray<T>::forParticle(int id,void (T::*mf)(P),const P& arg)
477 {
478  T* pp=m_nt->ptr_by_id(id);
479  if(pp!=NULL){
480  (pp->*mf)(arg);
481  }
482 }
483 
490 template<typename T>
491 void ParallelParticleArray<T>::forParticleTag(int tag,void (T::*mf)())
492 {
493  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
494  iter!=m_nt->end();
495  iter++){
496  if(iter->getTag()==tag){
497  ((*iter).*mf)();
498  }
499  }
500 }
501 
509 template<typename T>
510 template<typename P>
511 void ParallelParticleArray<T>::forParticleTag(int tag,void (T::*mf)(P),const P& arg)
512 {
513  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
514  iter!=m_nt->end();
515  iter++){
516  if(iter->getTag()==tag){
517  ((*iter).*mf)(arg);
518  }
519  }
520 }
521 
531 template<typename T>
532 void ParallelParticleArray<T>::forParticleTagMask(int tag,int mask, void (T::*mf)())
533 {
534  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
535  iter!=m_nt->end();
536  iter++){
537  if((iter->getTag() & mask) == (tag & mask)){
538  ((*iter).*mf)();
539  }
540  }
541 }
542 
553 template<typename T>
554 template<typename P>
555 void ParallelParticleArray<T>::forParticleTagMask(int tag,int mask,void (T::*mf)(P),const P& arg)
556 {
557  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
558  iter!=m_nt->end();
559  iter++){
560  if((iter->getTag() & mask) == (tag & mask)){
561  ((*iter).*mf)(arg);
562  }
563  }
564 }
565 
569 template<typename T>
571 {
572  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
573  iter!=m_nt->end();
574  iter++){
575  ((*iter).*fnc)();
576  }
577 }
578 
582 template<typename T>
583 void ParallelParticleArray<T>::forAllParticles(void (T::*fnc)()const)
584 {
585  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
586  iter!=m_nt->end();
587  iter++){
588  ((*iter).*fnc)();
589  }
590 }
591 
598 template<typename T>
599 template<typename P>
600 void ParallelParticleArray<T>::forAllParticles(void (T::*fnc)(P),const P& arg)
601 {
602  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
603  iter!=m_nt->end();
604  iter++){
605  ((*iter).*fnc)(arg);
606  }
607 }
608 
615 template<typename T>
616 template<typename P>
617 void ParallelParticleArray<T>::forAllInnerParticles(void (T::*fnc)(P&),P& arg)
618 {
619 
620  NTBlock<T> InnerBlock=m_nt->inner();
621  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
622  iter!=InnerBlock.end();
623  iter++){
624  ((*iter).*fnc)(arg);
625  }
626 }
627 
640 template<typename T>
641 template<typename P>
642 void ParallelParticleArray<T>::forAllParticlesGet(P& cont,typename P::value_type (T::*rdf)()const)
643 {
644  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
645  iter!=m_nt->end();
646  iter++){
647  cont.push_back(((*iter).*rdf)());
648  }
649 }
650 
651 template<typename T>
653  const NtBlock &ntBlock
654 )
655  : m_ntBlock(ntBlock),
656  m_it(m_ntBlock.begin())
657 {
658  m_it = m_ntBlock.begin();
660 }
661 
662 template<typename T>
664 {
665  return (m_numRemaining > 0);
666 }
667 
668 template<typename T>
670 {
671  Particle &p = (*m_it);
672  m_it++;
673  m_numRemaining--;
674  return p;
675 }
676 
677 template<typename T>
679 {
680  return m_numRemaining;
681 }
682 
683 template<typename T>
685 {
686  return ParticleIterator(m_nt->inner());
687 }
688 
696 template<typename T>
697 template<typename P>
698 void ParallelParticleArray<T>::forAllInnerParticlesGet(P& cont,typename P::value_type (T::*rdf)()const)
699 {
700  NTBlock<T> InnerBlock=m_nt->inner();
701  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
702  iter!=InnerBlock.end();
703  iter++){
704  cont.push_back(((*iter).*rdf)());
705  }
706 }
707 
714 template<typename T>
715 template <typename P>
716 vector<pair<int,P> > ParallelParticleArray<T>::forAllParticlesGetIndexed(P (T::*rdf)() const)
717 {
718  vector<pair<int,P> > res;
719 
720  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
721  iter!=m_nt->end();
722  iter++){
723  res.push_back(make_pair(iter->getID(),((*iter).*rdf)()));
724  }
725 
726  return res;
727 }
728 
735 template<typename T>
736 template <typename P>
737 vector<pair<int,P> > ParallelParticleArray<T>::forAllInnerParticlesGetIndexed(P (T::*rdf)() const)
738 {
739  vector<pair<int,P> > res;
740 
741  NTBlock<T> InnerBlock=m_nt->inner();
742  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
743  iter!=InnerBlock.end();
744  iter++){
745  res.push_back(make_pair(iter->getID(),((*iter).*rdf)()));
746  }
747 
748  return res;
749 }
750 
766 template<typename T>
767 template<typename P>
768 void ParallelParticleArray<T>::forAllTaggedParticlesGet(P& cont,typename P::value_type (T::*rdf)()const,int tag,int mask)
769 {
770  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
771  iter!=m_nt->end();
772  iter++){
773  if((iter->getTag() | mask )==(tag | mask)){
774  cont.push_back(((*iter).*rdf)());
775  }
776  }
777 }
778 
788 template<typename T>
789 template<typename P>
790 void ParallelParticleArray<T>::forAllTaggedInnerParticlesGet(P& cont,typename P::value_type (T::*rdf)()const,int tag,int mask)
791 {
792  NTBlock<T> InnerBlock=m_nt->inner();
793  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
794  iter!=InnerBlock.end();
795  iter++){
796  int ptag=iter->getTag();
797  if((ptag & mask )==(tag & mask)){
798  cont.push_back(((*iter).*rdf)());
799  }
800  }
801 }
802 
811 template<typename T>
812 template <typename P>
813 vector<pair<int,P> > ParallelParticleArray<T>::forAllTaggedParticlesGetIndexed(P (T::*rdf)() const,int tag,int mask)
814 {
815  vector<pair<int,P> > res;
816 
817  for(typename NeighborTable<T>::iterator iter=m_nt->begin();
818  iter!=m_nt->end();
819  iter++){
820  int id=iter->getID();
821  int ptag=iter->getTag();
822  if(( ptag & mask )==(tag & mask)){
823  res.push_back(make_pair(id,((*iter).*rdf)()));
824  }
825  }
826 
827  return res;
828 }
829 
839 template<typename T>
840 template <typename P>
841 vector<pair<int,P> > ParallelParticleArray<T>::forAllInnerTaggedParticlesGetIndexed(P (T::*rdf)() const,int tag,int mask)
842 {
843  vector<pair<int,P> > res;
844 
845  NTBlock<T> InnerBlock=m_nt->inner();
846  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
847  iter!=InnerBlock.end();
848  iter++){
849  int id=iter->getID();
850  int ptag=iter->getTag();
851  if((ptag & mask )==(tag & mask)){
852  res.push_back(make_pair(id,((*iter).*rdf)()));
853  }
854  }
855 
856  return res;
857 }
858 
873 template<typename T>
874 template <typename P>
875 void ParallelParticleArray<T>::forPointsGetNearest(P& cont,typename P::value_type (T::*rdf)() const,const Vec3& orig,
876  double dx,double dy,double dz,int nx,int ny,int nz)
877 {
878  console.Debug() << "forPointsGetNearest" << "\n";
879 
880  Vec3 u_x=Vec3(1.0,0.0,0.0);
881  Vec3 u_y=Vec3(0.0,1.0,0.0);
882  Vec3 u_z=Vec3(0.0,0.0,1.0);
883  for(int ix=0;ix<nx;ix++){
884  for(int iy=0;iy<ny;iy++){
885  for(int iz=0;iz<nz;iz++){
886  Vec3 p=orig+double(ix)*dx*u_x+double(iy)*dy*u_y+double(iz)*dz*u_z;
887  cont.push_back(((*(m_nt->getNearestPtr(p))).*rdf)());
888  }
889  }
890  }
891 
892  console.Debug() << "end forPointsGetNearest" << "\n";
893 }
894 
901 template<typename T>
902 set<int> ParallelParticleArray<T>::getBoundarySlabIds(int dir,int up) const
903 {
904  set<int> res;
905  NTSlab<T> slab,slab2;
906 
907  switch(dir){
908  case 0 : // x-dir
909  if(up==1){
910  slab=m_nt->yz_slab(m_nt->xsize()-1);
911  slab2=m_nt->yz_slab(m_nt->xsize()-2);
912  } else {
913  slab=m_nt->yz_slab(0);
914  slab2=m_nt->yz_slab(1);
915  }
916  break;
917  case 1 : // y-dir
918  if(up==1){
919  slab=m_nt->xz_slab(m_nt->ysize()-1);
920  slab2=m_nt->xz_slab(m_nt->ysize()-2);
921  } else {
922  slab=m_nt->xz_slab(0);
923  slab2=m_nt->xz_slab(1);
924  }
925  break;
926  case 2 : // z-dir
927  if(up==1){
928  slab=m_nt->xy_slab(m_nt->zsize()-1);
929  slab2=m_nt->xy_slab(m_nt->zsize()-2);
930  } else {
931  slab=m_nt->xy_slab(0);
932  slab2=m_nt->xy_slab(1);
933  }
934  break;
935  default:
936  cout << "Error: wrong direction " << dir << " in getBoundarySlabIds" << endl;
937  }
938 
939  for(typename NTSlab<T>::iterator iter=slab.begin();
940  iter!=slab.end();
941  iter++){
942  res.insert(iter->getID());
943  }
944  for(typename NTSlab<T>::iterator iter=slab2.begin();
945  iter!=slab2.end();
946  iter++){
947  res.insert(iter->getID());
948  }
949 
950  return res;
951 }
952 
959 template<typename T>
960 set<int> ParallelParticleArray<T>::get2ndSlabIds(int dir,int up) const
961 {
962  set<int> res;
963  NTSlab<T> slab;
964 
965  switch(dir){
966  case 0 : // x-dir
967  if(up==1){
968  slab=m_nt->yz_slab(m_nt->xsize()-2);
969  } else {
970  slab=m_nt->yz_slab(1);
971  }
972  break;
973  case 1 : // y-dir
974  if(up==1){
975  slab=m_nt->xz_slab(m_nt->ysize()-2);
976  } else {
977  slab=m_nt->xz_slab(1);
978  }
979  break;
980  case 2 : // z-dir
981  if(up==1){
982  slab=m_nt->xy_slab(m_nt->zsize()-2);
983  } else {
984  slab=m_nt->xy_slab(1);
985  }
986  break;
987  default:
988  cout << "Error: wrong direction " << dir << " in get2ndSlabIds" << endl;
989  }
990 
991  for(typename NTSlab<T>::iterator iter=slab.begin();
992  iter!=slab.end();
993  iter++){
994  res.insert(iter->getID());
995  }
996 
997  return res;
998 }
999 
1005 template<typename T>
1007 {
1008  NTBlock<T> InnerBlock=m_nt->inner();
1009  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
1010  iter!=InnerBlock.end();
1011  iter++){
1012  pv.push_back(*iter);
1013  }
1014 }
1015 
1016 
1022 template<typename T>
1024 {
1025  console.Debug() << "ParallelParticleArray<T>::saveCheckPointData\n";
1026 
1027  // output dimensions
1028  ost << m_nt->base_point() << "\n";
1029  ost << m_nt->base_idx_x() << " " << m_nt->base_idx_y() << " " << m_nt->base_idx_z() << "\n";
1030 
1031  // get nr. of particles in inner block
1032  ost << getInnerSize() << "\n";
1033 
1034  // save particles to stream
1035  NTBlock<T> InnerBlock=m_nt->inner();
1036  for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
1037  iter!=InnerBlock.end();
1038  iter++){
1039  iter->saveCheckPointData(ost);
1040  ost << "\n";
1041  }
1042 }
1043 
1049 template<typename T>
1051 {
1052  console.Debug() << "ParallelParticleArray<T>::loadCheckPointData\n";
1053  int nparts;
1054 
1055  // get dimensions
1056  Vec3 bp;
1057  int bix,biy,biz;
1058  ist >> bp;
1059  ist >> bix;
1060  ist >> biy;
1061  ist >> biz;
1062  // barf if different from current values
1063  if((bp!=m_nt->base_point()) ||
1064  (bix!=m_nt->base_idx_x()) ||
1065  (biy!=m_nt->base_idx_y()) ||
1066  (biz!=m_nt->base_idx_z())){
1067  std::cerr << "local area data inconsistet: bp " << bp << " / " << m_nt->base_point()
1068  << " bix: " << bix << " / " << m_nt->base_idx_x()
1069  << " biy: " << biy << " / " << m_nt->base_idx_y()
1070  << " bix: " << biz << " / " << m_nt->base_idx_z() << std::endl;
1071  }
1072 
1073  // get nr. of particles
1074  ist >> nparts;
1075 
1076  // load particles from stream and try to insert them
1077  for(int i=0;i!=nparts;i++){
1078  T p;
1079  p.loadCheckPointData(ist);
1080  if(!isInInner(p.getPos())) std::cerr << "not in inner: " << p.getPos() << std::endl;
1081  m_nt->insert(p);
1082  }
1083 }
1084 
1085 
1092 template<typename T>
1093 ostream& operator<<(ostream& ost, const ParallelParticleArray<T> &ppa)
1094 {
1095  ost << "--ParallelParticleArray--" << endl;
1096  ost << *(ppa.m_nt) << endl;
1097  return ost;
1098 }
ParallelParticleArray::~ParallelParticleArray
~ParallelParticleArray()
Definition: pp_array.hpp:171
NTBlock
representation of a slab of the search array of a NeigborTable
Definition: nt_block.h:33
ParallelParticleArray::ParticleIterator
Definition: pp_array.h:144
ParallelParticleArray::m_circ_edge_x_up
bool m_circ_edge_x_up
Definition: pp_array.h:86
ParallelParticleArray::m_yshift
double m_yshift
Definition: pp_array.h:85
ParallelParticleArray::ParticleIterator::m_numRemaining
int m_numRemaining
Definition: pp_array.h:161
ParallelParticleArray::forAllInnerParticles
void forAllInnerParticles(void(T::*rdf)(P &), P &)
Definition: pp_array.hpp:617
Console::Debug
Console & Debug()
set verbose level of next message to "dbg"
ParallelParticleArray::m_zshift
double m_zshift
circular shift values
Definition: pp_array.h:85
ParallelParticleArray::ParticleIterator::m_ntBlock
NtBlock m_ntBlock
Definition: pp_array.h:159
Console::Error
Console & Error()
set verbose level of next message to "err"
NeighborTable::iterator
list< T >::iterator iterator
Definition: ntable.h:80
console.h
ParallelParticleArray::ParticleIterator::ParticleIterator
ParticleIterator(const NtBlock &ntBlock)
Definition: pp_array.hpp:652
ParallelParticleArray::getParticlePtrByIndex
T * getParticlePtrByIndex(int)
Definition: pp_array.hpp:220
ParallelParticleArray::getAllInnerParticles
void getAllInnerParticles(vector< T > &)
get all particles in inner block and put them into a vector
Definition: pp_array.hpp:1006
ParallelParticleArray::forParticleTagMask
void forParticleTagMask(int, int, void(T::*rdf)())
Definition: pp_array.hpp:532
ParallelParticleArray::forAllTaggedInnerParticlesGet
void forAllTaggedInnerParticlesGet(P &, typename P::value_type(T::*rdf)() const, int, int)
Definition: pp_array.hpp:790
NTSlab::erase
void erase(iterator)
ParallelParticleArray::forAllInnerParticlesGet
void forAllInnerParticlesGet(P &, typename P::value_type(T::*rdf)() const)
Definition: pp_array.hpp:698
ParallelParticleArray::forAllParticlesGet
void forAllParticlesGet(P &, typename P::value_type(T::*rdf)() const)
Definition: pp_array.hpp:642
ParallelParticleArray::ParallelParticleArray
ParallelParticleArray(TML_Comm *comm, const vector< unsigned int > &dims, const Vec3 &min, const Vec3 &max, double rmax, double alpha)
Definition: pp_array.hpp:62
ParallelParticleArray::operator<<
friend ostream & operator<<(ostream &, const ParallelParticleArray< TT > &)
ParallelParticleArray::getBoundarySlabIds
virtual set< int > getBoundarySlabIds(int, int) const
Definition: pp_array.hpp:902
ParallelParticleArray::forParticle
void forParticle(int, void(T::*rdf)())
Definition: pp_array.hpp:456
ParallelParticleArray::insert
void insert(const T &)
particle insertion
Definition: pp_array.hpp:182
ParallelParticleArray::getInnerSize
int getInnerSize()
Definition: pp_array.h:105
ParallelParticleArray::ParticleIterator::next
Particle & next()
Definition: pp_array.hpp:669
ParallelParticleArray
parrallel particle storage array with neighborsearch and variable exchange
Definition: pp_array.h:75
ParallelParticleArray::getParticlePtrByPosition
T * getParticlePtrByPosition(const Vec3 &)
Definition: pp_array.hpp:232
NTSlab
representation of a slab of the search array of a NeigborTable
Definition: nt_slab.h:35
ParallelParticleArray::ParticleIterator::getNumRemaining
int getNumRemaining() const
Definition: pp_array.hpp:678
NULL
#define NULL
Definition: t_list.h:17
NTBlock_iter
iterator for a NTBlock
Definition: ntb_iter.h:39
ParallelParticleArray::forPointsGetNearest
void forPointsGetNearest(P &, typename P::value_type(T::*rdf)() const, const Vec3 &, double, double, double, int, int, int)
Definition: pp_array.hpp:875
NTBlock::begin
iterator begin()
Definition: nt_block.hpp:73
AParallelParticleArray
abstract base class for parallel particle storage array
Definition: pp_array.h:42
ParallelParticleArray::exchange
void exchange(P(T::*rdf)(), void(T::*wrtf)(const P &))
Definition: pp_array.hpp:373
ParallelParticleArray::rebuild
void rebuild()
Definition: pp_array.hpp:242
NTBlock::size
unsigned int size()
Definition: nt_block.hpp:56
nt_slab.h
ParallelParticleArray::ParticleIterator::hasNext
bool hasNext() const
Definition: pp_array.hpp:663
NeighborTable
class for neighbor search
Definition: ntable.h:68
ParallelParticleArray::forAllTaggedParticlesGet
void forAllTaggedParticlesGet(P &, typename P::value_type(T::*rdf)() const, int, int)
Definition: pp_array.hpp:768
ParallelParticleArray::m_xshift
double m_xshift
Definition: pp_array.h:85
ParallelParticleArray::isInInner
virtual bool isInInner(const Vec3 &)
Definition: pp_array.hpp:208
Vec3
Definition: vec3.h:47
ParallelParticleArray::forAllTaggedParticlesGetIndexed
vector< pair< int, P > > forAllTaggedParticlesGetIndexed(P(T::*rdf)() const, int, int)
Definition: pp_array.hpp:813
esys::lsm::bpu::iter
boost::python::object iter(const boost::python::object &pyOb)
Definition: Util.h:25
ParallelParticleArray::forAllParticlesGetIndexed
vector< pair< int, P > > forAllParticlesGetIndexed(P(T::*rdf)() const)
Definition: pp_array.hpp:716
TML_Comm
abstract base class for communicator
Definition: comm.h:47
NTSlab::size
unsigned int size() const
Definition: nt_slab.hpp:38
ParallelParticleArray::loadCheckPointData
void loadCheckPointData(std::istream &)
Definition: pp_array.hpp:1050
ParallelParticleArray::forAllInnerTaggedParticlesGetIndexed
vector< pair< int, P > > forAllInnerTaggedParticlesGetIndexed(P(T::*rdf)() const, int, int)
Definition: pp_array.hpp:841
ParallelParticleArray::getInnerParticleIterator
ParticleIterator getInnerParticleIterator()
Definition: pp_array.hpp:684
NTSlab_iter
iterator for a NTSlab
Definition: nts_iter.h:39
NTSlab::begin
iterator begin()
Definition: nt_slab.hpp:54
ParallelParticleArray::m_circ_edge_x_down
bool m_circ_edge_x_down
circular edge flags
Definition: pp_array.h:86
ParallelParticleArray::forAllParticles
void forAllParticles(void(T::*rdf)())
Definition: pp_array.hpp:570
ParallelParticleArray::get2ndSlabIds
virtual set< int > get2ndSlabIds(int, int) const
Definition: pp_array.hpp:960
ParallelParticleArray::ParticleIterator::m_it
BlockIterator m_it
Definition: pp_array.h:160
ParallelParticleArray::ParticleIterator::Particle
T Particle
Definition: pp_array.h:147
ParallelParticleArray::saveCheckPointData
void saveCheckPointData(std::ostream &)
Definition: pp_array.hpp:1023
ParallelParticleArray::exchange_single
void exchange_single(P(T::*rdf)(), void(T::*wrtf)(const P &), NTSlab< T >, NTSlab< T >, int, int)
Definition: pp_array.hpp:413
console
Console console
Definition: console.cpp:25
ParallelParticleArray::forParticleTag
void forParticleTag(int, void(T::*rdf)())
Definition: pp_array.hpp:491
NTBlock::end
iterator end()
Definition: nt_block.hpp:107
ParallelParticleArray::m_nt
NeighborTable< T > * m_nt
Definition: pp_array.h:83
ParallelParticleArray::forAllInnerParticlesGetIndexed
vector< pair< int, P > > forAllInnerParticlesGetIndexed(P(T::*rdf)() const)
Definition: pp_array.hpp:737
NTSlab::end
iterator end()
Definition: nt_slab.hpp:76