diff --git a/mrmd/io/DumpH5MDParallel.cpp b/mrmd/io/DumpH5MDParallel.cpp index bd262005..73e8bb2f 100644 --- a/mrmd/io/DumpH5MDParallel.cpp +++ b/mrmd/io/DumpH5MDParallel.cpp @@ -39,11 +39,50 @@ class DumpH5MDParallelImpl public: explicit DumpH5MDParallelImpl(DumpH5MDParallel& config) : config_(config) {} + void open(const std::string& filename, + const data::Subdomain& subdomain, + const data::Atoms& atoms); + void dumpStep(const data::Subdomain& subdomain, + const data::Atoms& atoms, + const idx_t step, + const real_t dt); + void close() const; + void dump(const std::string& filename, const data::Subdomain& subdomain, const data::Atoms& atoms); private: + hid_t createFile(const std::string& filename, const hid_t& propertyList) const; + void closeFile(const hid_t& fileId) const; + hid_t createGroup(const hid_t& parentElementId, const std::string& groupName) const; + void closeGroup(const hid_t& groupId) const; + void openBox(const data::Subdomain& subdomain) const; + hid_t createChunkedDataset(const hid_t& groupId, + const std::vector& dims, + const std::string& name, + const hid_t& dtype) const; + void closeDataset(const hid_t& datasetId) const; + + template + void appendData(const hid_t datasetId, + const std::vector& data, + const std::vector& dims) const; + template + void appendDataParallel(const hid_t datasetId, + const std::vector& data, + const std::vector& dims) const; + void appendEdges(const idx_t& step, const real_t& dt, const data::Subdomain& subdomain) const; + void appendCharges(const idx_t& step, const real_t& dt, const data::HostAtoms& atoms) const; + void appendForces(const idx_t& step, const real_t& dt, const data::HostAtoms& atoms) const; + void appendMasses(const idx_t& step, const real_t& dt, const data::HostAtoms& atoms) const; + void appendPositions(const idx_t& step, const real_t& dt, const data::HostAtoms& atoms) const; + void appendRelativeMasses(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const; + void appendTypes(const idx_t& step, const real_t& dt, const data::HostAtoms& atoms) const; + void appendVelocities(const idx_t& step, const real_t& dt, const data::HostAtoms& atoms) const; + void updateCache(const data::HostAtoms& atoms); void writeHeader(hid_t fileId) const; @@ -71,6 +110,453 @@ class DumpH5MDParallelImpl int64_t particleOffset = -1; }; +void DumpH5MDParallelImpl::open(const std::string& filename, + const data::Subdomain& subdomain, + const data::Atoms& atoms) +{ + MPI_Info info = MPI_INFO_NULL; + + auto propertyList = CHECK_HDF5(H5Pcreate(H5P_FILE_ACCESS)); + CHECK_HDF5(H5Pset_fapl_mpio(propertyList, config_.mpiInfo->comm, info)); + + config_.fileId = createFile(filename, propertyList); + + CHECK_HDF5(H5Pclose(propertyList)); + + config_.particleGroupId = createGroup(config_.fileId, "particles"); + config_.particleSubGroupId = createGroup(config_.particleGroupId, config_.particleSubGroupName); + writeHeader(config_.fileId); + openBox(subdomain); + + config_.chargesGroupId = createGroup(config_.particleSubGroupId, "charge"); + config_.chargesStepSetId = createChunkedDataset( + config_.chargesGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.chargesTimeSetId = createChunkedDataset( + config_.chargesGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.chargesValueSetId = createChunkedDataset(config_.chargesGroupId, + std::vector{1, atoms.size(), 1}, + "value", + H5T_NATIVE_DOUBLE); + + config_.forceGroupId = createGroup(config_.particleSubGroupId, "force"); + config_.forceStepSetId = createChunkedDataset( + config_.forceGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.forceTimeSetId = createChunkedDataset( + config_.forceGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.forceValueSetId = createChunkedDataset( + config_.forceGroupId, std::vector{1, atoms.size(), 3}, "value", H5T_NATIVE_DOUBLE); + + config_.massGroupId = createGroup(config_.particleSubGroupId, "mass"); + config_.massStepSetId = createChunkedDataset( + config_.massGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.massTimeSetId = createChunkedDataset( + config_.massGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.massValueSetId = createChunkedDataset( + config_.massGroupId, std::vector{1, atoms.size(), 1}, "value", H5T_NATIVE_DOUBLE); + + config_.posGroupId = createGroup(config_.particleSubGroupId, "position"); + config_.posStepSetId = + createChunkedDataset(config_.posGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.posTimeSetId = createChunkedDataset( + config_.posGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.posValueSetId = createChunkedDataset( + config_.posGroupId, std::vector{1, atoms.size(), 3}, "value", H5T_NATIVE_DOUBLE); + + config_.relativeMassGroupId = createGroup(config_.particleSubGroupId, "relativeMass"); + config_.relativeMassStepSetId = createChunkedDataset( + config_.relativeMassGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.relativeMassTimeSetId = createChunkedDataset( + config_.relativeMassGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.relativeMassValueSetId = createChunkedDataset(config_.relativeMassGroupId, + std::vector{1, atoms.size(), 1}, + "value", + H5T_NATIVE_DOUBLE); + + config_.typeGroupId = createGroup(config_.particleSubGroupId, "type"); + config_.typeStepSetId = createChunkedDataset( + config_.typeGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.typeTimeSetId = createChunkedDataset( + config_.typeGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.typeValueSetId = createChunkedDataset( + config_.typeGroupId, std::vector{1, atoms.size(), 1}, "value", H5T_NATIVE_INT64); + + config_.velGroupId = createGroup(config_.particleSubGroupId, "velocity"); + config_.velStepSetId = + createChunkedDataset(config_.velGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.velTimeSetId = createChunkedDataset( + config_.velGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.velValueSetId = createChunkedDataset( + config_.velGroupId, std::vector{1, atoms.size(), 3}, "value", H5T_NATIVE_DOUBLE); +} + +hid_t DumpH5MDParallelImpl::createFile(const std::string& filename, const hid_t& propertyList) const +{ + auto fileId = CHECK_HDF5(H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, propertyList)); + return fileId; +} + +void DumpH5MDParallelImpl::closeFile(const hid_t& fileId) const { CHECK_HDF5(H5Fclose(fileId)); } + +hid_t DumpH5MDParallelImpl::createGroup(const hid_t& parentElementId, + const std::string& groupName) const +{ + auto groupId = CHECK_HDF5( + H5Gcreate(parentElementId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)); + return groupId; +} + +void DumpH5MDParallelImpl::closeGroup(const hid_t& groupId) const { CHECK_HDF5(H5Gclose(groupId)); } + +void DumpH5MDParallelImpl::close() const +{ + closeDataset(config_.velValueSetId); + closeDataset(config_.velTimeSetId); + closeDataset(config_.velStepSetId); + closeGroup(config_.velGroupId); + closeDataset(config_.typeValueSetId); + closeDataset(config_.typeTimeSetId); + closeDataset(config_.typeStepSetId); + closeGroup(config_.typeGroupId); + closeDataset(config_.relativeMassValueSetId); + closeDataset(config_.relativeMassTimeSetId); + closeDataset(config_.relativeMassStepSetId); + closeGroup(config_.relativeMassGroupId); + closeDataset(config_.posValueSetId); + closeDataset(config_.posTimeSetId); + closeDataset(config_.posStepSetId); + closeGroup(config_.posGroupId); + closeDataset(config_.massStepSetId); + closeDataset(config_.massTimeSetId); + closeDataset(config_.massValueSetId); + closeGroup(config_.massGroupId); + closeDataset(config_.chargesValueSetId); + closeDataset(config_.chargesTimeSetId); + closeDataset(config_.chargesStepSetId); + closeDataset(config_.forceValueSetId); + closeDataset(config_.forceTimeSetId); + closeDataset(config_.forceStepSetId); + closeGroup(config_.forceGroupId); + closeGroup(config_.chargesGroupId); + closeDataset(config_.edgesValueSetId); + closeDataset(config_.edgesTimeSetId); + closeDataset(config_.edgesStepSetId); + closeGroup(config_.edgesGroupId); + closeGroup(config_.boxGroupId); + closeGroup(config_.particleSubGroupId); + closeGroup(config_.particleGroupId); + closeFile(config_.fileId); +} + +void DumpH5MDParallelImpl::openBox(const data::Subdomain& subdomain) const +{ + config_.boxGroupId = createGroup(config_.particleSubGroupId, "box"); + + std::vector dims = {3}; + CHECK_HDF5(H5LTset_attribute_int( + config_.particleSubGroupId, "box", "dimension", dims.data(), dims.size())); + CHECK_HDF5(H5LTset_attribute_double(config_.particleSubGroupId, + "box", + "minCorner", + subdomain.minCorner.data(), + subdomain.minCorner.size())); + CHECK_HDF5(H5LTset_attribute_double(config_.particleSubGroupId, + "box", + "maxCorner", + subdomain.maxCorner.data(), + subdomain.maxCorner.size())); + CHECK_HDF5(H5LTset_attribute_double(config_.particleSubGroupId, + "box", + "ghostLayerThickness", + subdomain.ghostLayerThickness.data(), + subdomain.ghostLayerThickness.size())); + + auto boundaryType = H5Tcopy(H5T_C_S1); + CHECK_HDF5(H5Tset_size(boundaryType, 8)); + CHECK_HDF5(H5Tset_strpad(boundaryType, H5T_STR_NULLPAD)); + std::vector boundaryDims = {3}; + auto space = H5Screate_simple(int_c(boundaryDims.size()), boundaryDims.data(), nullptr); + auto att = + H5Acreate(config_.boxGroupId, "boundary", boundaryType, space, H5P_DEFAULT, H5P_DEFAULT); + CHECK_HDF5(H5Awrite(att, boundaryType, "periodicperiodicperiodic")); + CHECK_HDF5(H5Aclose(att)); + CHECK_HDF5(H5Sclose(space)); + CHECK_HDF5(H5Tclose(boundaryType)); + + config_.edgesGroupId = createGroup(config_.boxGroupId, "edges"); + config_.edgesStepSetId = createChunkedDataset( + config_.edgesGroupId, std::vector{1}, "step", H5T_NATIVE_INT64); + config_.edgesTimeSetId = createChunkedDataset( + config_.edgesGroupId, std::vector{1}, "time", H5T_NATIVE_DOUBLE); + config_.edgesValueSetId = createChunkedDataset( + config_.edgesGroupId, std::vector{1, 3}, "value", H5T_NATIVE_DOUBLE); +} + +hid_t DumpH5MDParallelImpl::createChunkedDataset(const hid_t& groupId, + const std::vector& dims, + const std::string& name, + const hid_t& dtype) const +{ + std::vector max_dims = dims; + max_dims[0] = H5S_UNLIMITED; + hid_t fileSpace = H5Screate_simple(int_c(dims.size()), dims.data(), max_dims.data()); + + hid_t plist = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_layout(plist, H5D_CHUNKED); + + H5Pset_chunk(plist, int_c(dims.size()), dims.data()); + + auto datasetId = + H5Dcreate(groupId, name.c_str(), dtype, fileSpace, H5P_DEFAULT, plist, H5P_DEFAULT); + + H5Pclose(plist); + H5Sclose(fileSpace); + + return datasetId; +} + +void DumpH5MDParallelImpl::closeDataset(const hid_t& datasetId) const { H5Dclose(datasetId); } + +void DumpH5MDParallelImpl::dumpStep(const data::Subdomain& subdomain, + const data::Atoms& atoms, + const idx_t step, + const real_t dt) +{ + data::HostAtoms h_atoms(atoms); // NOLINT + + updateCache(h_atoms); + + appendEdges(step, dt, subdomain); + appendCharges(step, dt, h_atoms); + appendForces(step, dt, h_atoms); + appendMasses(step, dt, h_atoms); + appendPositions(step, dt, h_atoms); + appendRelativeMasses(step, dt, h_atoms); + appendTypes(step, dt, h_atoms); + appendVelocities(step, dt, h_atoms); + config_.saveCount += 1; +} + +void DumpH5MDParallelImpl::appendEdges(const idx_t& step, + const real_t& dt, + const data::Subdomain& subdomain) const +{ + appendData(config_.edgesStepSetId, std::vector{step}, std::vector{1}); + appendData( + config_.edgesTimeSetId, std::vector{real_c(step) * dt}, std::vector{1}); + appendData( + config_.edgesValueSetId, + std::vector{subdomain.diameter[0], subdomain.diameter[1], subdomain.diameter[2]}, + std::vector{1, 3}); +} + +void DumpH5MDParallelImpl::appendCharges(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const +{ + appendData(config_.chargesStepSetId, std::vector{step}, std::vector{1}); + appendData( + config_.chargesTimeSetId, std::vector{real_c(step) * dt}, std::vector{1}); + hsize_t numberLocalAtoms = atoms.numLocalAtoms; + constexpr int64_t dimensions = 1; + std::vector charges; + charges.reserve(numLocalParticles * dimensions); + for (idx_t idx = 0; idx < numLocalParticles; ++idx) + { + charges.emplace_back(atoms.getCharge()(idx)); + } + MRMD_HOST_CHECK_EQUAL(int64_c(charges.size()), numLocalParticles * dimensions); + appendDataParallel( + config_.chargesValueSetId, charges, std::vector{1, numberLocalAtoms, dimensions}); +} + +void DumpH5MDParallelImpl::appendForces(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const +{ + appendData(config_.forceStepSetId, std::vector{step}, std::vector{1}); + appendData( + config_.forceTimeSetId, std::vector{real_c(step) * dt}, std::vector{1}); + hsize_t numberLocalAtoms = atoms.numLocalAtoms; + constexpr int64_t dimensions = 3; + std::vector positions; + positions.reserve(numLocalParticles * dimensions); + for (idx_t idx = 0; idx < atoms.numLocalAtoms; ++idx) + { + positions.emplace_back(atoms.getForce()(idx, 0)); + positions.emplace_back(atoms.getForce()(idx, 1)); + positions.emplace_back(atoms.getForce()(idx, 2)); + } + MRMD_HOST_CHECK_EQUAL(int64_c(positions.size()), numLocalParticles * dimensions); + appendDataParallel( + config_.forceValueSetId, positions, std::vector{1, numberLocalAtoms, dimensions}); +} + +void DumpH5MDParallelImpl::appendMasses(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const +{ + appendData(config_.massStepSetId, std::vector{step}, std::vector{1}); + appendData( + config_.massTimeSetId, std::vector{real_c(step) * dt}, std::vector{1}); + hsize_t numberLocalAtoms = atoms.numLocalAtoms; + constexpr int64_t dimensions = 1; + std::vector masses; + masses.reserve(numLocalParticles * dimensions); + for (idx_t idx = 0; idx < numLocalParticles; ++idx) + { + masses.emplace_back(atoms.getMass()(idx)); + } + MRMD_HOST_CHECK_EQUAL(int64_c(masses.size()), numLocalParticles * dimensions); + appendDataParallel( + config_.massValueSetId, masses, std::vector{1, numberLocalAtoms, dimensions}); +} + +void DumpH5MDParallelImpl::appendPositions(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const +{ + appendData(config_.posStepSetId, std::vector{step}, std::vector{1}); + appendData( + config_.posTimeSetId, std::vector{real_c(step) * dt}, std::vector{1}); + hsize_t numberLocalAtoms = atoms.numLocalAtoms; + constexpr int64_t dimensions = 3; + std::vector positions; + positions.reserve(numLocalParticles * dimensions); + for (idx_t idx = 0; idx < atoms.numLocalAtoms; ++idx) + { + positions.emplace_back(atoms.getPos()(idx, 0)); + positions.emplace_back(atoms.getPos()(idx, 1)); + positions.emplace_back(atoms.getPos()(idx, 2)); + } + MRMD_HOST_CHECK_EQUAL(int64_c(positions.size()), numLocalParticles * dimensions); + appendDataParallel( + config_.posValueSetId, positions, std::vector{1, numberLocalAtoms, dimensions}); +} + +void DumpH5MDParallelImpl::appendRelativeMasses(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const +{ + appendData(config_.relativeMassStepSetId, std::vector{step}, std::vector{1}); + appendData(config_.relativeMassTimeSetId, + std::vector{real_c(step) * dt}, + std::vector{1}); + hsize_t numberLocalAtoms = atoms.numLocalAtoms; + constexpr int64_t dimensions = 1; + std::vector relativeMasses; + relativeMasses.reserve(numLocalParticles * dimensions); + for (idx_t idx = 0; idx < numLocalParticles; ++idx) + { + relativeMasses.emplace_back(atoms.getRelativeMass()(idx)); + } + MRMD_HOST_CHECK_EQUAL(int64_c(relativeMasses.size()), numLocalParticles * dimensions); + appendDataParallel(config_.relativeMassValueSetId, + relativeMasses, + std::vector{1, numberLocalAtoms, dimensions}); +} + +void DumpH5MDParallelImpl::appendTypes(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const +{ + appendData(config_.typeStepSetId, std::vector{step}, std::vector{1}); + appendData( + config_.typeTimeSetId, std::vector{real_c(step) * dt}, std::vector{1}); + hsize_t numberLocalAtoms = atoms.numLocalAtoms; + constexpr int64_t dimensions = 1; + std::vector types; + types.reserve(numLocalParticles * dimensions); + for (idx_t idx = 0; idx < numLocalParticles; ++idx) + { + types.emplace_back(atoms.getType()(idx)); + } + MRMD_HOST_CHECK_EQUAL(int64_c(types.size()), numLocalParticles * dimensions); + appendDataParallel( + config_.typeValueSetId, types, std::vector{1, numberLocalAtoms, dimensions}); +} + +void DumpH5MDParallelImpl::appendVelocities(const idx_t& step, + const real_t& dt, + const data::HostAtoms& atoms) const +{ + appendData(config_.velStepSetId, std::vector{step}, std::vector{1}); + appendData( + config_.velTimeSetId, std::vector{real_c(step) * dt}, std::vector{1}); + hsize_t numberLocalAtoms = atoms.numLocalAtoms; + constexpr int64_t dimensions = 3; + std::vector velocities; + velocities.reserve(numLocalParticles * dimensions); + for (idx_t idx = 0; idx < atoms.numLocalAtoms; ++idx) + { + velocities.emplace_back(atoms.getVel()(idx, 0)); + velocities.emplace_back(atoms.getVel()(idx, 1)); + velocities.emplace_back(atoms.getVel()(idx, 2)); + } + MRMD_HOST_CHECK_EQUAL(int64_c(velocities.size()), numLocalParticles * dimensions); + appendDataParallel( + config_.velValueSetId, velocities, std::vector{1, numberLocalAtoms, dimensions}); +} + +template +void DumpH5MDParallelImpl::appendDataParallel(const hid_t datasetId, + const std::vector& data, + const std::vector& dims) const +{ + std::vector newSize = dims; + newSize[0] = config_.saveCount + 1; + H5Dset_extent(datasetId, newSize.data()); + + const auto fileSpace = H5Dget_space(datasetId); + + std::vector offset(dims.size(), 0); + offset[0] = config_.saveCount; + offset[1] = particleOffset; + std::vector stride(dims.size(), 1); + std::vector count(dims.size(), 1); + + CHECK_HDF5(H5Sselect_hyperslab( + fileSpace, H5S_SELECT_SET, offset.data(), stride.data(), count.data(), dims.data())); + + std::vector localOffset(dims.size(), 0); + const hid_t memorySpace = H5Screate_simple(int_c(dims.size()), dims.data(), nullptr); + CHECK_HDF5(H5Sselect_hyperslab( + memorySpace, H5S_SELECT_SET, localOffset.data(), stride.data(), count.data(), dims.data())); + + auto propertyList = CHECK_HDF5(H5Pcreate(H5P_DATASET_XFER)); + CHECK_HDF5(H5Pset_dxpl_mpio(propertyList, H5FD_MPIO_COLLECTIVE)); + CHECK_HDF5( + H5Dwrite(datasetId, typeToHDF5(), memorySpace, fileSpace, propertyList, data.data())); + + CHECK_HDF5(H5Pclose(propertyList)); + H5Sclose(fileSpace); + H5Sclose(memorySpace); +} + +template +void DumpH5MDParallelImpl::appendData(const hid_t datasetId, + const std::vector& data, + const std::vector& dims) const +{ + const hid_t memorySpace = H5Screate_simple(int_c(dims.size()), dims.data(), nullptr); + + std::vector newSize = dims; + newSize[0] = config_.saveCount + 1; + H5Dset_extent(datasetId, newSize.data()); + + const auto fileSpace = H5Dget_space(datasetId); + + std::vector start(dims.size(), 0); + start[0] = config_.saveCount; + std::vector count = dims; + count[0] = 1; + H5Sselect_hyperslab(fileSpace, H5S_SELECT_SET, start.data(), nullptr, count.data(), nullptr); + + H5Dwrite(datasetId, typeToHDF5(), memorySpace, fileSpace, H5P_DEFAULT, data.data()); + + H5Sclose(fileSpace); + H5Sclose(memorySpace); +} + template void DumpH5MDParallelImpl::writeParallel(hid_t fileId, const std::string& name, @@ -144,7 +630,7 @@ void DumpH5MDParallelImpl::writeHeader(hid_t fileId) const void DumpH5MDParallelImpl::writeBox(hid_t fileId, const data::Subdomain& subdomain) const { - std::string groupName = "/particles/" + config_.particleGroupName + "/box"; + std::string groupName = "/particles/" + config_.particleSubGroupName + "/box"; auto group = CHECK_HDF5(H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)); std::vector dims = {3}; @@ -208,7 +694,7 @@ void DumpH5MDParallelImpl::writePos(hid_t fileId, const data::HostAtoms& atoms) using Datatype = real_t; constexpr int64_t dimensions = 3; ///< dimensions of the property - std::string groupName = "/particles/" + config_.particleGroupName + "/" + config_.posDataset; + std::string groupName = "/particles/" + config_.particleSubGroupName + "/" + config_.posDataset; auto group = H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); std::vector data; @@ -244,7 +730,7 @@ void DumpH5MDParallelImpl::writeVel(hid_t fileId, const data::HostAtoms& atoms) using Datatype = real_t; constexpr int64_t dimensions = 3; ///< dimensions of the property - std::string groupName = "/particles/" + config_.particleGroupName + "/" + config_.velDataset; + std::string groupName = "/particles/" + config_.particleSubGroupName + "/" + config_.velDataset; auto group = H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); std::vector data; @@ -280,7 +766,8 @@ void DumpH5MDParallelImpl::writeForce(hid_t fileId, const data::HostAtoms& atoms using Datatype = real_t; constexpr int64_t dimensions = 3; ///< dimensions of the property - std::string groupName = "/particles/" + config_.particleGroupName + "/" + config_.forceDataset; + std::string groupName = + "/particles/" + config_.particleSubGroupName + "/" + config_.forceDataset; auto group = H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); std::vector data; @@ -316,7 +803,8 @@ void DumpH5MDParallelImpl::writeType(hid_t fileId, const data::HostAtoms& atoms) using Datatype = idx_t; constexpr int64_t dimensions = 1; ///< dimensions of the property - std::string groupName = "/particles/" + config_.particleGroupName + "/" + config_.typeDataset; + std::string groupName = + "/particles/" + config_.particleSubGroupName + "/" + config_.typeDataset; auto group = H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); std::vector data; @@ -350,7 +838,8 @@ void DumpH5MDParallelImpl::writeMass(hid_t fileId, const data::HostAtoms& atoms) using Datatype = real_t; constexpr int64_t dimensions = 1; ///< dimensions of the property - std::string groupName = "/particles/" + config_.particleGroupName + "/" + config_.massDataset; + std::string groupName = + "/particles/" + config_.particleSubGroupName + "/" + config_.massDataset; auto group = H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); std::vector data; @@ -384,7 +873,8 @@ void DumpH5MDParallelImpl::writeCharge(hid_t fileId, const data::HostAtoms& atom using Datatype = real_t; constexpr int64_t dimensions = 1; ///< dimensions of the property - std::string groupName = "/particles/" + config_.particleGroupName + "/" + config_.chargeDataset; + std::string groupName = + "/particles/" + config_.particleSubGroupName + "/" + config_.chargeDataset; auto group = H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); std::vector data; @@ -419,7 +909,7 @@ void DumpH5MDParallelImpl::writeRelativeMass(hid_t fileId, const data::HostAtoms constexpr int64_t dimensions = 1; ///< dimensions of the property std::string groupName = - "/particles/" + config_.particleGroupName + "/" + config_.relativeMassDataset; + "/particles/" + config_.particleSubGroupName + "/" + config_.relativeMassDataset; auto group = H5Gcreate(fileId, groupName.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); std::vector data; @@ -479,9 +969,9 @@ void DumpH5MDParallelImpl::dump(const std::string& filename, auto group1 = CHECK_HDF5(H5Gcreate(file_id, "/particles", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)); - std::string particleGroup = "/particles/" + config_.particleGroupName; + std::string particleSubGroup = "/particles/" + config_.particleSubGroupName; auto group2 = CHECK_HDF5( - H5Gcreate(file_id, particleGroup.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)); + H5Gcreate(file_id, particleSubGroup.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)); writeHeader(file_id); writeBox(file_id, subdomain); @@ -498,9 +988,31 @@ void DumpH5MDParallelImpl::dump(const std::string& filename, CHECK_HDF5(H5Fclose(file_id)); } - } // namespace impl +void DumpH5MDParallel::open(const std::string& filename, + const data::Subdomain& subdomain, + const data::Atoms& atoms) +{ + impl::DumpH5MDParallelImpl helper(*this); + helper.open(filename, subdomain, atoms); +} + +void DumpH5MDParallel::dumpStep(const data::Subdomain& subdomain, + const data::Atoms& atoms, + const idx_t& step, + const real_t& dt) +{ + impl::DumpH5MDParallelImpl helper(*this); + helper.dumpStep(subdomain, atoms, step, dt); +} + +void DumpH5MDParallel::close() +{ + impl::DumpH5MDParallelImpl helper(*this); + helper.close(); +} + void DumpH5MDParallel::dump(const std::string& filename, const data::Subdomain& subdomain, const data::Atoms& atoms) @@ -509,6 +1021,28 @@ void DumpH5MDParallel::dump(const std::string& filename, helper.dump(filename, subdomain, atoms); } #else +void DumpH5MDParallel::open(const std::string& /*filename*/, + const data::Subdomain& /*subdomain*/, + const data::Atoms& /*atoms*/) +{ + MRMD_HOST_CHECK(false, "HDF5 Support not available!"); + exit(EXIT_FAILURE); +} + +void DumpH5MDParallel::close() +{ + MRMD_HOST_CHECK(false, "HDF5 Support not available!"); + exit(EXIT_FAILURE); +} +void DumpH5MDParallel::dumpStep(const data::Subdomain& /*subdomain*/, + const data::Atoms& /*atoms*/, + const idx_t& /*step*/, + const real_t& /*dt*/) +{ + MRMD_HOST_CHECK(false, "HDF5 Support not available!"); + exit(EXIT_FAILURE); +} + void DumpH5MDParallel::dump(const std::string& /*filename*/, const data::Subdomain& /*subdomain*/, const data::Atoms& /*atoms*/) diff --git a/mrmd/io/DumpH5MDParallel.hpp b/mrmd/io/DumpH5MDParallel.hpp index 64d54fcf..3cc16112 100644 --- a/mrmd/io/DumpH5MDParallel.hpp +++ b/mrmd/io/DumpH5MDParallel.hpp @@ -23,15 +23,26 @@ namespace mrmd::io { + class DumpH5MDParallel { public: DumpH5MDParallel(const std::shared_ptr& mpiInfoArg, const std::string& authorArg, - const std::string& particleGroupNameArg = "atoms") - : mpiInfo(mpiInfoArg), author(authorArg), particleGroupName(particleGroupNameArg) + const std::string& particleSubGroupNameArg = "atoms") + : mpiInfo(mpiInfoArg), author(authorArg), particleSubGroupName(particleSubGroupNameArg) { } + void open(const std::string& filename, + const data::Subdomain& subdomain, + const data::Atoms& atoms); + + void dumpStep(const data::Subdomain& subdomain, + const data::Atoms& atoms, + const idx_t& step, + const real_t& dt); + + void close(); void dump(const std::string& filename, const data::Subdomain& subdomain, @@ -56,7 +67,45 @@ class DumpH5MDParallel std::shared_ptr mpiInfo; std::string author = "xxx"; - std::string particleGroupName = "atoms"; -}; + std::string particleSubGroupName = "atoms"; + int64_t fileId; + int64_t particleGroupId; + int64_t particleSubGroupId; + int64_t boxGroupId; + int64_t edgesGroupId; + int64_t edgesStepSetId; + int64_t edgesTimeSetId; + int64_t edgesValueSetId; + int64_t chargesGroupId; + int64_t chargesStepSetId; + int64_t chargesTimeSetId; + int64_t chargesValueSetId; + int64_t forceGroupId; + int64_t forceStepSetId; + int64_t forceTimeSetId; + int64_t forceValueSetId; + int64_t massGroupId; + int64_t massStepSetId; + int64_t massTimeSetId; + int64_t massValueSetId; + int64_t posGroupId; + int64_t posStepSetId; + int64_t posTimeSetId; + int64_t posValueSetId; + int64_t relativeMassGroupId; + int64_t relativeMassStepSetId; + int64_t relativeMassTimeSetId; + int64_t relativeMassValueSetId; + int64_t typeGroupId; + int64_t typeStepSetId; + int64_t typeTimeSetId; + int64_t typeValueSetId; + int64_t velGroupId; + int64_t velStepSetId; + int64_t velTimeSetId; + int64_t velValueSetId; + + uint64_t saveCount = 0; +}; } // namespace mrmd::io \ No newline at end of file diff --git a/mrmd/io/H5MD.test.cpp b/mrmd/io/H5MD.test.cpp index 0a683280..0c918993 100644 --- a/mrmd/io/H5MD.test.cpp +++ b/mrmd/io/H5MD.test.cpp @@ -66,21 +66,42 @@ data::Atoms getAtoms(const std::shared_ptr& mpiInfo) return atoms; } -TEST(H5MD, dump) + +void shuffleAtoms(data::Subdomain& subdomain, data::Atoms& atoms, const idx_t& step) { - auto mpiInfo = std::make_shared(MPI_COMM_WORLD); + auto RNG = Kokkos::Random_XorShift1024_Pool<>(1234 * step); - auto subdomain1 = data::Subdomain({1_r, 2_r, 3_r}, {4_r, 6_r, 8_r}, 0.5_r); - auto atoms1 = getAtoms(mpiInfo); + auto pos = atoms.getPos(); + auto vel = atoms.getVel(); + auto force = atoms.getForce(); - auto dump = DumpH5MDParallel(mpiInfo, "XzzX"); - dump.dump("dummy.h5md", subdomain1, atoms1); + auto policy = Kokkos::RangePolicy<>(0, atoms.numLocalAtoms); + auto kernel = KOKKOS_LAMBDA(const idx_t idx) + { + auto randGen = RNG.get_state(); + pos(idx, 0) = randGen.drand() * subdomain.diameter[0] + subdomain.minCorner[0]; + pos(idx, 1) = randGen.drand() * subdomain.diameter[1] + subdomain.minCorner[1]; + pos(idx, 2) = randGen.drand() * subdomain.diameter[2] + subdomain.minCorner[2]; - auto subdomain2 = data::Subdomain(); - auto atoms2 = data::Atoms(0); - auto restore = RestoreH5MDParallel(mpiInfo); - restore.restore("dummy.h5md", subdomain2, atoms2); + vel(idx, 0) = (randGen.drand() - 0.5_r); + vel(idx, 1) = (randGen.drand() - 0.5_r); + vel(idx, 2) = (randGen.drand() - 0.5_r); + + force(idx, 0) = (randGen.drand() - 0.5_r); + force(idx, 1) = (randGen.drand() - 0.5_r); + force(idx, 2) = (randGen.drand() - 0.5_r); + RNG.free_state(randGen); + }; + Kokkos::parallel_for("shuffle-atoms", policy, kernel); + Kokkos::fence(); +} + +void compareSystems(const data::Subdomain& subdomain1, + const data::Atoms& atoms1, + const data::Subdomain& subdomain2, + const data::Atoms& atoms2) +{ EXPECT_FLOAT_EQ(subdomain1.ghostLayerThickness[0], subdomain2.ghostLayerThickness[0]); EXPECT_FLOAT_EQ(subdomain1.ghostLayerThickness[1], subdomain2.ghostLayerThickness[1]); EXPECT_FLOAT_EQ(subdomain1.ghostLayerThickness[2], subdomain2.ghostLayerThickness[2]); @@ -117,5 +138,79 @@ TEST(H5MD, dump) EXPECT_FLOAT_EQ(h_atoms1.getRelativeMass()(idx), h_atoms2.getRelativeMass()(idx)); } } + +TEST(H5MD, dump) +{ + auto mpiInfo = std::make_shared(MPI_COMM_WORLD); + + auto subdomain1 = data::Subdomain({1_r, 2_r, 3_r}, {4_r, 6_r, 8_r}, 0.5_r); + auto atoms1 = getAtoms(mpiInfo); + + auto dump = DumpH5MDParallel(mpiInfo, "XzzX"); + dump.dump("dummy.h5md", subdomain1, atoms1); + + auto subdomain2 = data::Subdomain(); + auto atoms2 = data::Atoms(0); + auto restore = RestoreH5MDParallel(mpiInfo); + restore.restore("dummy.h5md", subdomain2, atoms2); + + compareSystems(subdomain1, atoms1, subdomain2, atoms2); +} + +TEST(H5MD, dumpMultipleSteps) +{ + auto mpiInfo = std::make_shared(MPI_COMM_WORLD); + + auto subdomain1 = data::Subdomain({1_r, 2_r, 3_r}, {4_r, 6_r, 8_r}, 0.5_r); + auto atoms1 = getAtoms(mpiInfo); + real_t dt = 0.002_r; + + auto dump = DumpH5MDParallel(mpiInfo, "J-Hizzle"); + + auto subdomain2 = data::Subdomain(); + auto atoms2 = data::Atoms(0); + auto restore = RestoreH5MDParallel(mpiInfo); + + dump.open("dummyMultipleSteps.h5md", subdomain1, atoms1); + dump.dumpStep(subdomain1, atoms1, 0, dt); + dump.close(); + + restore.restore("dummyMultipleSteps.h5md", subdomain2, atoms2, 0); + compareSystems(subdomain1, atoms1, subdomain2, atoms2); +} + +TEST(H5MD, dumpConsistency) +{ + auto mpiInfo = std::make_shared(MPI_COMM_WORLD); + + auto subdomain0 = data::Subdomain({1_r, 2_r, 3_r}, {4_r, 6_r, 8_r}, 0.5_r); + auto atoms0 = getAtoms(mpiInfo); + real_t dt = 0.002_r; + + auto dump = DumpH5MDParallel(mpiInfo, "J-Hizzle"); + + auto subdomain1 = data::Subdomain(); + auto atoms1 = data::Atoms(0); + auto subdomain2 = data::Subdomain(); + auto atoms2 = data::Atoms(0); + auto restore = RestoreH5MDParallel(mpiInfo); + + dump.open("dummyConsistencyMultipleSteps.h5md", subdomain0, atoms0); + + for (idx_t step = 0; step < 5; ++step) + { + shuffleAtoms(subdomain0, atoms0, step); + dump.dumpStep(subdomain0, atoms0, step, dt); + } + + dump.close(); + + dump.dump("dummyConsistencyFinalStep.h5md", subdomain0, atoms0); + + restore.restore("dummyConsistencyMultipleSteps.h5md", subdomain1, atoms1, 4); + restore.restore("dummyConsistencyFinalStep.h5md", subdomain2, atoms2); + compareSystems(subdomain1, atoms1, subdomain2, atoms2); +} + } // namespace io } // namespace mrmd \ No newline at end of file diff --git a/mrmd/io/RestoreH5MDParallel.cpp b/mrmd/io/RestoreH5MDParallel.cpp index 333ce8a3..25153244 100644 --- a/mrmd/io/RestoreH5MDParallel.cpp +++ b/mrmd/io/RestoreH5MDParallel.cpp @@ -26,7 +26,8 @@ namespace mrmd::io template void RestoreH5MDParallel::readParallel(hid_t fileId, const std::string& dataset, - std::vector& data) + std::vector& data, + const idx_t& saveCount) { auto dset = CHECK_HDF5(H5Dopen(fileId, dataset.c_str(), H5P_DEFAULT)); auto dspace = CHECK_HDF5(H5Dget_space(dset)); @@ -53,6 +54,7 @@ void RestoreH5MDParallel::readParallel(hid_t fileId, // set up local part of the input file std::vector offset(globalDims.size(), 0); + offset[0] = saveCount; offset[1] = localOffset; std::vector stride(globalDims.size(), 1); std::vector count(globalDims.size(), 1); @@ -88,7 +90,8 @@ void RestoreH5MDParallel::readParallel(hid_t fileId, void RestoreH5MDParallel::restore(const std::string& filename, data::Subdomain& subdomain, - data::Atoms& atoms) + data::Atoms& atoms, + const idx_t& saveCount) { MPI_Info info = MPI_INFO_NULL; @@ -97,7 +100,7 @@ void RestoreH5MDParallel::restore(const std::string& filename, auto fileId = CHECK_HDF5(H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist)); - std::string groupName = "/particles/" + particleGroupName_ + "/box"; + std::string groupName = "/particles/" + particleSubGroupName_ + "/box"; CHECK_HDF5(H5LTget_attribute_double( fileId, groupName.c_str(), "minCorner", subdomain.minCorner.data())); CHECK_HDF5(H5LTget_attribute_double( @@ -109,49 +112,64 @@ void RestoreH5MDParallel::restore(const std::string& filename, std::vector pos; if (restorePos) { - readParallel(fileId, "/particles/" + particleGroupName_ + "/" + posDataset + "/value", pos); + readParallel(fileId, + "/particles/" + particleSubGroupName_ + "/" + posDataset + "/value", + pos, + saveCount); MRMD_HOST_CHECK_EQUAL(pos.size() / 3 * 3, pos.size()); } std::vector vel; if (restoreVel) { - readParallel(fileId, "/particles/" + particleGroupName_ + "/" + velDataset + "/value", vel); + readParallel(fileId, + "/particles/" + particleSubGroupName_ + "/" + velDataset + "/value", + vel, + saveCount); MRMD_HOST_CHECK_EQUAL(pos.size() / 3 * 3, vel.size()); } std::vector force; if (restoreForce) { - readParallel( - fileId, "/particles/" + particleGroupName_ + "/" + forceDataset + "/value", force); + readParallel(fileId, + "/particles/" + particleSubGroupName_ + "/" + forceDataset + "/value", + force, + saveCount); MRMD_HOST_CHECK_EQUAL(pos.size() / 3 * 3, force.size()); } std::vector type; if (restoreType) { - readParallel( - fileId, "/particles/" + particleGroupName_ + "/" + typeDataset + "/value", type); + readParallel(fileId, + "/particles/" + particleSubGroupName_ + "/" + typeDataset + "/value", + type, + saveCount); MRMD_HOST_CHECK_EQUAL(pos.size() / 3 * 1, type.size()); } std::vector mass; if (restoreMass) { - readParallel( - fileId, "/particles/" + particleGroupName_ + "/" + massDataset + "/value", mass); + readParallel(fileId, + "/particles/" + particleSubGroupName_ + "/" + massDataset + "/value", + mass, + saveCount); MRMD_HOST_CHECK_EQUAL(pos.size() / 3 * 1, mass.size()); } std::vector charge; if (restoreCharge) { - readParallel( - fileId, "/particles/" + particleGroupName_ + "/" + chargeDataset + "/value", charge); + readParallel(fileId, + "/particles/" + particleSubGroupName_ + "/" + chargeDataset + "/value", + charge, + saveCount); MRMD_HOST_CHECK_EQUAL(pos.size() / 3 * 1, charge.size()); } std::vector relativeMass; if (restoreRelativeMass) { readParallel(fileId, - "/particles/" + particleGroupName_ + "/" + relativeMassDataset + "/value", - relativeMass); + "/particles/" + particleSubGroupName_ + "/" + relativeMassDataset + "/value", + relativeMass, + saveCount); MRMD_HOST_CHECK_EQUAL(pos.size() / 3 * 1, relativeMass.size()); } @@ -192,7 +210,8 @@ void RestoreH5MDParallel::restore(const std::string& filename, template void RestoreH5MDParallel::readParallel(hid_t /*fileId*/, const std::string& /*dataset*/, - std::vector& /*data*/) + std::vector& /*data*/, + const idx_t& /*saveCount*/) { MRMD_HOST_CHECK(false, "HDF5 support not available!"); exit(EXIT_FAILURE); @@ -200,7 +219,8 @@ void RestoreH5MDParallel::readParallel(hid_t /*fileId*/, void RestoreH5MDParallel::restore(const std::string& /*filename*/, data::Subdomain& /*subdomain*/, - data::Atoms& /*atoms*/) + data::Atoms& /*atoms*/, + const idx_t& /*step*/) { MRMD_HOST_CHECK(false, "HDF5 support not available!"); exit(EXIT_FAILURE); diff --git a/mrmd/io/RestoreH5MDParallel.hpp b/mrmd/io/RestoreH5MDParallel.hpp index 45766ef1..88f2f194 100644 --- a/mrmd/io/RestoreH5MDParallel.hpp +++ b/mrmd/io/RestoreH5MDParallel.hpp @@ -30,12 +30,15 @@ class RestoreH5MDParallel { public: RestoreH5MDParallel(const std::shared_ptr& mpiInfo, - const std::string& particleGroupName = "atoms") - : mpiInfo_(mpiInfo), particleGroupName_(particleGroupName) + const std::string& particleSubGroupName = "atoms") + : mpiInfo_(mpiInfo), particleSubGroupName_(particleSubGroupName) { } - void restore(const std::string& filename, data::Subdomain& subdomain, data::Atoms& atoms); + void restore(const std::string& filename, + data::Subdomain& subdomain, + data::Atoms& atoms, + const idx_t& saveCount = 0); bool restorePos = true; bool restoreVel = true; @@ -55,10 +58,13 @@ class RestoreH5MDParallel private: template - void readParallel(hid_t fileId, const std::string& dataset, std::vector& data); + void readParallel(hid_t fileId, + const std::string& dataset, + std::vector& data, + const idx_t& saveCount); std::shared_ptr mpiInfo_; - std::string particleGroupName_; + std::string particleSubGroupName_; }; } // namespace mrmd::io \ No newline at end of file