MPI: added an additional line at the upper MPI-bound for correct material averaging

This also fixes the current send and receive issue
--> Engine_MPI::SendReceiveCurrents()
pull/1/head
Thorsten Liebig 2011-03-15 09:41:29 +01:00
parent edb40489d7
commit 005eb3a4f6
5 changed files with 30 additions and 33 deletions

View File

@ -57,11 +57,11 @@ void Engine_MPI::Init()
if (m_Op_MPI->m_NeighborDown[n]>=0)
{
m_BufferDown[n] = new float[m_BufferSize[n]*3];
m_BufferDown[n] = new float[m_BufferSize[n]*2];
}
if (m_Op_MPI->m_NeighborUp[n]>=0)
{
m_BufferUp[n] = new float[m_BufferSize[n]*3];
m_BufferUp[n] = new float[m_BufferSize[n]*2];
}
}
}
@ -100,7 +100,7 @@ void Engine_MPI::SendReceiveVoltages()
//send voltages
unsigned int iPos=0;
pos[n]=numLines[n]-1;
pos[n]=numLines[n]-2;
if (m_Op_MPI->m_NeighborUp[n]>=0)
{
for (pos[nP]=0; pos[nP]<numLines[nP]; ++pos[nP])
@ -136,14 +136,6 @@ void Engine_MPI::SendReceiveVoltages()
void Engine_MPI::SendReceiveCurrents()
{
/*
TODO:
the FDTD engine could update the normal currents (e.g. i_z at z_max) (magnetic fields) on the last line, but the default engine does not need to...
workaround: transfer all three current components
drawback: data transfer is larger than necessary
future fix: update normal currents
*/
if (!m_Op_MPI->GetMPIEnabled())
return;
@ -152,7 +144,7 @@ void Engine_MPI::SendReceiveCurrents()
//non-blocking prepare for receive...
for (int n=0;n<3;++n)
if (m_Op_MPI->m_NeighborUp[n]>=0)
MPI_Irecv( m_BufferUp[n] , m_BufferSize[n]*3, MPI_FLOAT, m_Op_MPI->m_NeighborUp[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Recv_Request[n]);
MPI_Irecv( m_BufferUp[n] , m_BufferSize[n]*2, MPI_FLOAT, m_Op_MPI->m_NeighborUp[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Recv_Request[n]);
for (int n=0;n<3;++n)
{
@ -168,16 +160,15 @@ void Engine_MPI::SendReceiveCurrents()
{
for (pos[nPP]=0; pos[nPP]<numLines[nPP]; ++pos[nPP])
{
m_BufferDown[n][iPos++] = Engine_SSE_Compressed::GetCurr(n ,pos);
m_BufferDown[n][iPos++] = Engine_SSE_Compressed::GetCurr(nP ,pos);
m_BufferDown[n][iPos++] = Engine_SSE_Compressed::GetCurr(nPP,pos);
}
}
MPI_Isend( m_BufferDown[n] , m_BufferSize[n]*3, MPI_FLOAT, m_Op_MPI->m_NeighborDown[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Send_Request[n]);
MPI_Isend( m_BufferDown[n] , m_BufferSize[n]*2, MPI_FLOAT, m_Op_MPI->m_NeighborDown[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Send_Request[n]);
}
//receive currents
pos[n]=numLines[n]-1;
pos[n]=numLines[n]-2;
iPos=0;
if (m_Op_MPI->m_NeighborUp[n]>=0)
{
@ -187,7 +178,6 @@ void Engine_MPI::SendReceiveCurrents()
{
for (pos[nPP]=0; pos[nPP]<numLines[nPP]; ++pos[nPP])
{
Engine_SSE_Compressed::SetCurr(n ,pos,m_BufferUp[n][iPos++]);
Engine_SSE_Compressed::SetCurr(nP ,pos,m_BufferUp[n][iPos++]);
Engine_SSE_Compressed::SetCurr(nPP,pos,m_BufferUp[n][iPos++]);
}

View File

@ -239,17 +239,29 @@ bool openEMS_FDTD_MPI::SetupMPI(TiXmlElement* FDTD_Opts)
if (i>0)
m_MPI_Op->SetNeighborDown(0,procTable[i-1][j][k]);
if (i<SplitNumber[0].size()-2)
{
//add one additional line
grid->AddDiscLine(0, m_Original_Grid->GetLine(0,SplitNumber[0].at(i+1)+1 ));
m_MPI_Op->SetNeighborUp(0,procTable[i+1][j][k]);
}
if (j>0)
m_MPI_Op->SetNeighborDown(1,procTable[i][j-1][k]);
if (j<SplitNumber[1].size()-2)
{
//add one additional line
grid->AddDiscLine(1, m_Original_Grid->GetLine(1,SplitNumber[1].at(j+1)+1 ));
m_MPI_Op->SetNeighborUp(1,procTable[i][j+1][k]);
}
if (k>0)
m_MPI_Op->SetNeighborDown(2,procTable[i][j][k-1]);
if (k<SplitNumber[2].size()-2)
{
//add one additional line
grid->AddDiscLine(2, m_Original_Grid->GetLine(2,SplitNumber[2].at(k+1)+1 ));
m_MPI_Op->SetNeighborUp(2,procTable[i][j][k+1]);
}
}
}

View File

@ -61,9 +61,9 @@ inline unsigned int Operator_Cylinder::GetNumberOfLines(int ny) const
{
//this is necessary for a correct field processing... cylindrical engine has to reset this by adding +1
if (CC_closedAlpha && ny==1)
return numLines[1]-1;
return Operator_Multithread::GetNumberOfLines(ny)-1;
return numLines[ny];
return Operator_Multithread::GetNumberOfLines(ny);
}
string Operator_Cylinder::GetDirName(int ny) const

View File

@ -77,20 +77,6 @@ void Operator_MPI::SetBoundaryCondition(int* BCs)
Operator_SSE_Compressed::SetBoundaryCondition(BCs);
}
void Operator_MPI::ApplyElectricBC(bool* dirs)
{
if (!m_MPI_Enabled)
return Operator_SSE_Compressed::ApplyElectricBC(dirs);
for (int n=0;n<3;++n)
{
//do not delete operator at upper inteface
if (m_NeighborUp[n]>=0)
dirs[2*n+1] = false;
}
Operator_SSE_Compressed::ApplyElectricBC(dirs);
}
Engine* Operator_MPI::CreateEngine() const
{
if (m_MPI_Enabled)
@ -171,6 +157,14 @@ void Operator_MPI::SetOriginalMesh(CSRectGrid* orig_Mesh)
}
}
unsigned int Operator_MPI::GetNumberOfLines(int ny) const
{
if ((!m_MPI_Enabled) || (m_NeighborUp[ny]<0))
return Operator_SSE_Compressed::GetNumberOfLines(ny);
return Operator_SSE_Compressed::GetNumberOfLines(ny)-1;
}
string Operator_MPI::PrependRank(string name)
{
stringstream out_name;

View File

@ -31,7 +31,6 @@ public:
bool GetMPIEnabled() const {return m_MPI_Enabled;}
virtual void SetBoundaryCondition(int* BCs);
virtual void ApplyElectricBC(bool* dirs);
virtual Engine* CreateEngine() const;
@ -45,6 +44,8 @@ public:
virtual void SetSplitPos(int ny, unsigned int pos) {m_SplitPos[ny]=pos;}
virtual void SetOriginalMesh(CSRectGrid* orig_Mesh);
virtual unsigned int GetNumberOfLines(int ny) const;
protected:
Operator_MPI();
bool m_MPI_Enabled;