diff --git a/pom.xml b/pom.xml
index cb29f816..f0bb703a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -94,6 +94,13 @@
test
+
+ org.mockito
+ mockito-core
+ 3.8.0
+ test
+
+
javax
javaee-api
diff --git a/src/main/config/run.properties.example b/src/main/config/run.properties.example
index 9879b7ac..85bc77b2 100644
--- a/src/main/config/run.properties.example
+++ b/src/main/config/run.properties.example
@@ -14,6 +14,8 @@ sizeCheckIntervalSeconds = 60
reader = db username root password password
!readOnly = true
maxIdsInQuery = 1000
+!allowRestoreFailures = true
+!missingFilesZipEntryName = path/to/FILENAME.txt
# Properties for archive storage
plugin.archive.class = org.icatproject.ids.storage.ArchiveFileStorage
diff --git a/src/main/java/org/icatproject/ids/FiniteStateMachine.java b/src/main/java/org/icatproject/ids/FiniteStateMachine.java
index 344e5a1f..a81bb981 100644
--- a/src/main/java/org/icatproject/ids/FiniteStateMachine.java
+++ b/src/main/java/org/icatproject/ids/FiniteStateMachine.java
@@ -624,10 +624,26 @@ public void recordFailure(Long id) {
}
}
- public void checkFailure(Long id) throws InternalException {
+ /**
+ * Check whether the Dataset/Datafile ID (depending on the StorageUnit set)
+ * is in the list of IDs that failed to restore. The behaviour then depends
+ * on whether the property allowRestoreFailures is set.
+ *
+ * @param id a Dataset or Datafile ID
+ * @return true if the ID is found in the list of failures and
+ * allowRestoreFailures is set, false if the ID is not found
+ * @throws InternalException if the ID is found in the list of failures
+ * and allowRestoreFailures is not set
+ */
+ public boolean checkFailure(Long id) throws InternalException {
if (failures.contains(id)) {
- throw new InternalException("Restore failed");
- }
- }
+ if (propertyHandler.getAllowRestoreFailures()) {
+ return true;
+ } else {
+ throw new InternalException("Restore failed");
+ }
+ }
+ return false;
+ }
}
diff --git a/src/main/java/org/icatproject/ids/IdsBean.java b/src/main/java/org/icatproject/ids/IdsBean.java
index c9ada56b..95476b04 100644
--- a/src/main/java/org/icatproject/ids/IdsBean.java
+++ b/src/main/java/org/icatproject/ids/IdsBean.java
@@ -88,17 +88,22 @@ public class RunPrepDsCheck implements Callable {
private Collection toCheck;
private Set emptyDatasets;
+ private PreparedStatus preparedStatus;
- public RunPrepDsCheck(Collection toCheck, Set emptyDatasets) {
+ public RunPrepDsCheck(Collection toCheck, Set emptyDatasets, PreparedStatus preparedStatus) {
this.toCheck = toCheck;
this.emptyDatasets = emptyDatasets;
+ this.preparedStatus = preparedStatus;
}
@Override
public Void call() throws Exception {
for (DsInfo dsInfo : toCheck) {
- fsm.checkFailure(dsInfo.getDsId());
- restoreIfOffline(dsInfo, emptyDatasets);
+ if (fsm.checkFailure(dsInfo.getDsId())) {
+ preparedStatus.failedRestores.add(dsInfo.getDsId());
+ } else {
+ restoreIfOffline(dsInfo, emptyDatasets);
+ }
}
return null;
}
@@ -108,17 +113,22 @@ public Void call() throws Exception {
public class RunPrepDfCheck implements Callable {
private SortedSet toCheck;
+ private PreparedStatus preparedStatus;
- public RunPrepDfCheck(SortedSet toCheck) {
+ public RunPrepDfCheck(SortedSet toCheck, PreparedStatus preparedStatus) {
this.toCheck = toCheck;
+ this.preparedStatus = preparedStatus;
}
@Override
public Void call() throws Exception {
for (DfInfoImpl dfInfo : toCheck) {
- fsm.checkFailure(dfInfo.getDfId());
- restoreIfOffline(dfInfo);
- }
+ if (fsm.checkFailure(dfInfo.getDfId())) {
+ preparedStatus.failedRestores.add(dfInfo.getDfId());
+ } else {
+ restoreIfOffline(dfInfo);
+ }
+ }
return null;
}
@@ -164,7 +174,7 @@ public Void call() throws Exception {
}
}
- private class SO implements StreamingOutput {
+ class SO implements StreamingOutput {
private long offset;
private boolean zip;
@@ -203,6 +213,7 @@ public void write(OutputStream output) throws IOException {
zos.setLevel(0); // Otherwise use default compression
}
+ List missingFiles = new ArrayList<>();
for (DfInfoImpl dfInfo : dfInfos) {
logger.debug("Adding " + dfInfo + " to zip");
transfer = dfInfo;
@@ -210,20 +221,40 @@ public void write(OutputStream output) throws IOException {
String entryName = zipMapper.getFullEntryName(dsInfo, dfInfo);
InputStream stream = null;
try {
- zos.putNextEntry(new ZipEntry(entryName));
stream = mainStorage.get(dfInfo.getDfLocation(), dfInfo.getCreateId(), dfInfo.getModId());
+ zos.putNextEntry(new ZipEntry(entryName));
int length;
while ((length = stream.read(bytes)) >= 0) {
zos.write(bytes, 0, length);
}
+ zos.closeEntry();
} catch (ZipException e) {
logger.debug("Skipped duplicate");
+ } catch (IOException e) {
+ if (propertyHandler.getAllowRestoreFailures()) {
+ logger.warn("Skipping missing file in zip: {}", entryName);
+ missingFiles.add(entryName);
+ } else {
+ throw e;
+ }
}
- zos.closeEntry();
if (stream != null) {
stream.close();
}
}
+ if (propertyHandler.getAllowRestoreFailures() && !missingFiles.isEmpty()) {
+ // add a file to the zip file listing the missing files
+ StringBuilder sb = new StringBuilder();
+ sb.append("The following files were not found:").append("\n");
+ for (String filename : missingFiles) {
+ sb.append(filename).append("\n");
+ }
+ byte[] data = sb.toString().getBytes();
+ ZipEntry e = new ZipEntry(propertyHandler.getMissingFilesZipEntryName());
+ zos.putNextEntry(e);
+ zos.write(data, 0, data.length);
+ zos.closeEntry();
+ }
zos.close();
} else {
DfInfoImpl dfInfo = dfInfos.iterator().next();
@@ -506,6 +537,7 @@ class PreparedStatus {
public DfInfoImpl fromDfElement;
public Future> future;
public Long fromDsElement;
+ public Set failedRestores = new HashSet<>();
};
private Map preparedStatusMap = new ConcurrentHashMap<>();
@@ -1367,43 +1399,7 @@ public String getStatus(String preparedId, String ip)
throw new InternalException(e.getClass() + " " + e.getMessage());
}
- final Set dfInfos = prepared.dfInfos;
- final Map dsInfos = prepared.dsInfos;
- Set emptyDatasets = prepared.emptyDatasets;
-
- Status status = Status.ONLINE;
-
- if (storageUnit == StorageUnit.DATASET) {
- Set restoring = fsm.getDsRestoring();
- Set maybeOffline = fsm.getDsMaybeOffline();
- for (DsInfo dsInfo : dsInfos.values()) {
- fsm.checkFailure(dsInfo.getDsId());
- if (restoring.contains(dsInfo)) {
- status = Status.RESTORING;
- } else if (maybeOffline.contains(dsInfo)) {
- status = Status.ARCHIVED;
- break;
- } else if (!emptyDatasets.contains(dsInfo.getDsId()) && !mainStorage.exists(dsInfo)) {
- status = Status.ARCHIVED;
- break;
- }
- }
- } else if (storageUnit == StorageUnit.DATAFILE) {
- Set restoring = fsm.getDfRestoring();
- Set maybeOffline = fsm.getDfMaybeOffline();
- for (DfInfo dfInfo : dfInfos) {
- fsm.checkFailure(dfInfo.getDfId());
- if (restoring.contains(dfInfo)) {
- status = Status.RESTORING;
- } else if (maybeOffline.contains(dfInfo)) {
- status = Status.ARCHIVED;
- break;
- } else if (!mainStorage.exists(dfInfo.getDfLocation())) {
- status = Status.ARCHIVED;
- break;
- }
- }
- }
+ Status status = calculateStatus(prepared.dsInfos, prepared.emptyDatasets, prepared.dfInfos);
logger.debug("Status is " + status.name());
@@ -1441,53 +1437,19 @@ public String getStatus(String sessionId, String investigationIds, String datase
validateUUID("sessionId", sessionId);
}
- // Do it
- Status status = Status.ONLINE;
-
+ DataSelection dataSelection = null;
if (storageUnit == StorageUnit.DATASET) {
- DataSelection dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds,
- datafileIds, Returns.DATASETS);
- Map dsInfos = dataSelection.getDsInfo();
-
- Set restoring = fsm.getDsRestoring();
- Set maybeOffline = fsm.getDsMaybeOffline();
- Set emptyDatasets = dataSelection.getEmptyDatasets();
- for (DsInfo dsInfo : dsInfos.values()) {
- fsm.checkFailure(dsInfo.getDsId());
- if (restoring.contains(dsInfo)) {
- status = Status.RESTORING;
- } else if (maybeOffline.contains(dsInfo)) {
- status = Status.ARCHIVED;
- break;
- } else if (!emptyDatasets.contains(dsInfo.getDsId()) && !mainStorage.exists(dsInfo)) {
- status = Status.ARCHIVED;
- break;
- }
- }
+ dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, datafileIds, Returns.DATASETS);
} else if (storageUnit == StorageUnit.DATAFILE) {
- DataSelection dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds,
- datafileIds, Returns.DATAFILES);
- Set dfInfos = dataSelection.getDfInfo();
-
- Set restoring = fsm.getDfRestoring();
- Set maybeOffline = fsm.getDfMaybeOffline();
- for (DfInfo dfInfo : dfInfos) {
- fsm.checkFailure(dfInfo.getDfId());
- if (restoring.contains(dfInfo)) {
- status = Status.RESTORING;
- } else if (maybeOffline.contains(dfInfo)) {
- status = Status.ARCHIVED;
- break;
- } else if (!mainStorage.exists(dfInfo.getDfLocation())) {
- status = Status.ARCHIVED;
- break;
- }
- }
+ dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, datafileIds, Returns.DATAFILES);
} else {
- // Throw exception if selection does not exist
- new DataSelection(icat, sessionId, investigationIds, datasetIds, datafileIds, Returns.DATASETS);
+ // (required for single level storage)
+ // throw exception if selection does not exist
+ dataSelection = new DataSelection(icat, sessionId, investigationIds, datasetIds, datafileIds, Returns.DATASETS);
}
+ Status status = calculateStatus(dataSelection.getDsInfo(), dataSelection.getEmptyDatasets(), dataSelection.getDfInfo());
+
logger.debug("Status is " + status.name());
if (logSet.contains(CallType.INFO)) {
@@ -1511,6 +1473,74 @@ public String getStatus(String sessionId, String investigationIds, String datase
}
+ /**
+ * Work out the overall status of the specified dataset or datafiles.
+ * Note that for single level storage all items are assumed to be ONLINE.
+ * For two level storage, either dsInfos and emptyDatasets will be populated
+ * for StorageUnit.DATASET or dfInfos for StorageUnit.DATAFILE.
+ *
+ * @param dsInfos a map of DsInfo objects keyed on Dataset ID
+ * @param emptyDatasets a list of Dataset IDs where the Dataset contains no Datafiles
+ * @param dfInfos a set of DfInfo objects
+ * @return the overall status of the restore: ONLINE, RESTORING or ARCHIVED
+ * @throws InternalException if allowRestoreFailures is not set then any DsInfo/DfInfo
+ * that fails the checkFailure call will throw an exception.
+ * If allowRestoreFailures is set then an exception will
+ * only be thrown if all DsInfo/DfInfos fail the check.
+ */
+ private Status calculateStatus(Map dsInfos, Set emptyDatasets, Set dfInfos) throws InternalException {
+ Status status = Status.ONLINE;
+
+ if (storageUnit == null) {
+ // single level storage
+ // (assume all items ONLINE)
+ return status;
+ }
+
+ Set failedRestoreIds = new HashSet<>();
+ int numItemsRequested = 0;
+
+ if (storageUnit == StorageUnit.DATASET) {
+ Set restoring = fsm.getDsRestoring();
+ Set maybeOffline = fsm.getDsMaybeOffline();
+ numItemsRequested = dsInfos.size();
+ for (DsInfo dsInfo : dsInfos.values()) {
+ if (fsm.checkFailure(dsInfo.getDsId())) {
+ failedRestoreIds.add(dsInfo.getDsId());
+ } else if (restoring.contains(dsInfo)) {
+ status = Status.RESTORING;
+ } else if (maybeOffline.contains(dsInfo)) {
+ return Status.ARCHIVED;
+ } else if (!emptyDatasets.contains(dsInfo.getDsId()) && !mainStorage.exists(dsInfo)) {
+ return Status.ARCHIVED;
+ }
+ }
+ } else if (storageUnit == StorageUnit.DATAFILE) {
+ Set restoring = fsm.getDfRestoring();
+ Set maybeOffline = fsm.getDfMaybeOffline();
+ numItemsRequested = dfInfos.size();
+ for (DfInfo dfInfo : dfInfos) {
+ if (fsm.checkFailure(dfInfo.getDfId())) {
+ failedRestoreIds.add(dfInfo.getDfId());
+ } else if (restoring.contains(dfInfo)) {
+ status = Status.RESTORING;
+ } else if (maybeOffline.contains(dfInfo)) {
+ return Status.ARCHIVED;
+ } else if (!mainStorage.exists(dfInfo.getDfLocation())) {
+ return Status.ARCHIVED;
+ }
+ }
+ }
+
+ if (!failedRestoreIds.isEmpty() && failedRestoreIds.size() == numItemsRequested) {
+ // All datasets/datafiles failed to restore.
+ // Even with allowRestoreFailures set to true an exception needs to be thrown.
+ throw new InternalException("Restore failed");
+ }
+
+ return status;
+ }
+
@PostConstruct
private void init() {
try {
@@ -1621,18 +1651,21 @@ public Boolean isPrepared(String preparedId, String ip)
}
}
+ int numItemsRequested = 0;
if (storageUnit == StorageUnit.DATASET) {
+ numItemsRequested = preparedJson.dsInfos.size();
Collection toCheck = status.fromDsElement == null ? preparedJson.dsInfos.values()
: preparedJson.dsInfos.tailMap(status.fromDsElement).values();
logger.debug("Will check online status of {} entries", toCheck.size());
for (DsInfo dsInfo : toCheck) {
- fsm.checkFailure(dsInfo.getDsId());
- if (restoreIfOffline(dsInfo, preparedJson.emptyDatasets)) {
+ if (fsm.checkFailure(dsInfo.getDsId())) {
+ status.failedRestores.add(dsInfo.getDsId());
+ } else if (restoreIfOffline(dsInfo, preparedJson.emptyDatasets)) {
prepared = false;
status.fromDsElement = dsInfo.getDsId();
toCheck = preparedJson.dsInfos.tailMap(status.fromDsElement).values();
logger.debug("Will check in background status of {} entries", toCheck.size());
- status.future = threadPool.submit(new RunPrepDsCheck(toCheck, preparedJson.emptyDatasets));
+ status.future = threadPool.submit(new RunPrepDsCheck(toCheck, preparedJson.emptyDatasets, status));
break;
}
}
@@ -1641,24 +1674,27 @@ public Boolean isPrepared(String preparedId, String ip)
: preparedJson.dsInfos.headMap(status.fromDsElement).values();
logger.debug("Will check finally online status of {} entries", toCheck.size());
for (DsInfo dsInfo : toCheck) {
- fsm.checkFailure(dsInfo.getDsId());
- if (restoreIfOffline(dsInfo, preparedJson.emptyDatasets)) {
+ if (fsm.checkFailure(dsInfo.getDsId())) {
+ status.failedRestores.add(dsInfo.getDsId());
+ } else if (restoreIfOffline(dsInfo, preparedJson.emptyDatasets)) {
prepared = false;
}
}
}
} else if (storageUnit == StorageUnit.DATAFILE) {
+ numItemsRequested = preparedJson.dfInfos.size();
SortedSet toCheck = status.fromDfElement == null ? preparedJson.dfInfos
: preparedJson.dfInfos.tailSet(status.fromDfElement);
logger.debug("Will check online status of {} entries", toCheck.size());
for (DfInfoImpl dfInfo : toCheck) {
- fsm.checkFailure(dfInfo.getDfId());
- if (restoreIfOffline(dfInfo)) {
+ if (fsm.checkFailure(dfInfo.getDfId())) {
+ status.failedRestores.add(dfInfo.getDfId());
+ } else if (restoreIfOffline(dfInfo)) {
prepared = false;
status.fromDfElement = dfInfo;
toCheck = preparedJson.dfInfos.tailSet(status.fromDfElement);
logger.debug("Will check in background status of {} entries", toCheck.size());
- status.future = threadPool.submit(new RunPrepDfCheck(toCheck));
+ status.future = threadPool.submit(new RunPrepDfCheck(toCheck, status));
break;
}
}
@@ -1667,14 +1703,19 @@ public Boolean isPrepared(String preparedId, String ip)
: preparedJson.dfInfos.headSet(status.fromDfElement);
logger.debug("Will check finally online status of {} entries", toCheck.size());
for (DfInfoImpl dfInfo : toCheck) {
- fsm.checkFailure(dfInfo.getDfId());
- if (restoreIfOffline(dfInfo)) {
+ if (fsm.checkFailure(dfInfo.getDfId())) {
+ status.failedRestores.add(dfInfo.getDfId());
+ } else if (restoreIfOffline(dfInfo)) {
prepared = false;
}
}
}
}
+ if (!status.failedRestores.isEmpty() && status.failedRestores.size() == numItemsRequested) {
+ throw new InternalException("Restore failed");
+ }
+
if (logSet.contains(CallType.INFO)) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (JsonGenerator gen = Json.createGenerator(baos).writeStartObject()) {
diff --git a/src/main/java/org/icatproject/ids/PropertyHandler.java b/src/main/java/org/icatproject/ids/PropertyHandler.java
index b93ade5a..68872d38 100644
--- a/src/main/java/org/icatproject/ids/PropertyHandler.java
+++ b/src/main/java/org/icatproject/ids/PropertyHandler.java
@@ -71,6 +71,8 @@ public static Logger getLogger() {
private StorageUnit storageUnit;
private long delayDatasetWrites;
private long delayDatafileOperations;
+ private boolean allowRestoreFailures;
+ private String missingFilesZipEntryName;
private ZipMapperInterface zipMapper;
private int tidyBlockSize;
private String key;
@@ -244,6 +246,9 @@ private PropertyHandler() {
logger.info("'log.list' entry not present so no JMS call logging will be performed");
}
+ allowRestoreFailures = props.getBoolean("allowRestoreFailures", false);
+ missingFilesZipEntryName = props.getString("missingFilesZipEntryName", "MISSING_FILES.txt");
+
} catch (CheckedPropertyException e) {
abort(e.getMessage());
}
@@ -417,4 +422,12 @@ public org.icatproject.icat.client.ICAT getRestIcat() {
return restIcat;
}
+ public boolean getAllowRestoreFailures() {
+ return allowRestoreFailures;
+ }
+
+ public String getMissingFilesZipEntryName() {
+ return missingFilesZipEntryName;
+ }
+
}
diff --git a/src/main/java/org/icatproject/ids/thread/DsRestorer.java b/src/main/java/org/icatproject/ids/thread/DsRestorer.java
index d3bd52b5..1ae059f1 100644
--- a/src/main/java/org/icatproject/ids/thread/DsRestorer.java
+++ b/src/main/java/org/icatproject/ids/thread/DsRestorer.java
@@ -45,6 +45,8 @@ public class DsRestorer implements Runnable {
private ZipMapperInterface zipMapper;
private Lock lock;
+ private boolean allowRestoreFailures;
+
public DsRestorer(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMachine fsm, IcatReader reader, Lock lock) {
this.dsInfo = dsInfo;
this.fsm = fsm;
@@ -54,6 +56,7 @@ public DsRestorer(DsInfo dsInfo, PropertyHandler propertyHandler, FiniteStateMac
datasetCache = propertyHandler.getCacheDir().resolve("dataset");
this.reader = reader;
this.lock = lock;
+ this.allowRestoreFailures = propertyHandler.getAllowRestoreFailures();
}
@Override
@@ -98,19 +101,20 @@ public void run() {
while (ze != null) {
String dfName = zipMapper.getFileName(ze.getName());
if (seen.contains(dfName)) {
- throw new RuntimeException("Corrupt archive for " + dsInfo + ": duplicate entry " + dfName);
+ logWarningOrThrowException("Corrupt archive for " + dsInfo + ": duplicate entry " + dfName);
}
String location = nameToLocalMap.get(dfName);
if (location == null) {
- throw new RuntimeException("Corrupt archive for " + dsInfo + ": spurious entry " + dfName);
+ logWarningOrThrowException("Corrupt archive for " + dsInfo + ": spurious entry " + dfName);
+ } else {
+ mainStorageInterface.put(zis, location);
}
- mainStorageInterface.put(zis, location);
ze = zis.getNextEntry();
seen.add(dfName);
}
zis.close();
if (!seen.equals(nameToLocalMap.keySet())) {
- throw new RuntimeException("Corrupt archive for " + dsInfo + ": missing entries");
+ logWarningOrThrowException("Corrupt archive for " + dsInfo + ": missing entries");
}
Files.delete(datasetCachePath);
fsm.recordSuccess(dsInfo.getDsId());
@@ -127,4 +131,13 @@ public void run() {
lock.release();
}
}
+
+ private void logWarningOrThrowException(String message) {
+ if (allowRestoreFailures) {
+ logger.warn(message);
+ } else {
+ throw new RuntimeException(message);
+ }
+ }
+
}
diff --git a/src/site/xhtml/installation.xhtml.vm b/src/site/xhtml/installation.xhtml.vm
index f1778be3..a684121d 100644
--- a/src/site/xhtml/installation.xhtml.vm
+++ b/src/site/xhtml/installation.xhtml.vm
@@ -149,6 +149,25 @@
The number of literal id values to be generated in an ICAT
query. For Oracle this must not exceed 1000.
+ allowRestoreFailures
+ Optional. If true, allows a restore request to complete even if
+ there are files which cannot be restored from archive storage. Only
+ if all the files requested are not found will an error condition be
+ returned. Otherwise the restore will proceed and the downloaded zip
+ file will contain a file listing the files that are missing (see
+ property missingFilesZipEntryName). WARNING: this must only be used
+ if the IDS is being used with the property readOnly set to true,
+ otherwise it may result in archived datasets becoming inconsistent.
+
+
+ missingFilesZipEntryName
+ Optional. Specifies the filename (and path if required) of a file
+ which will be added to the download zip file if
+ allowRestoreFailures is set to true and there are failures for a
+ request. If not specified, the default of MISSING_FILES.txt will be
+ used with the file appearing in the root of the zip file. The file
+ can also be put in a subfolder using eg. path/to/FILENAME.txt
+
log.list
Optional. If present it specifies a set of call types to log
via JMS calls. The types are specified by a space separated list of
diff --git a/src/test/java/org/icatproject/ids/TestSO.java b/src/test/java/org/icatproject/ids/TestSO.java
new file mode 100644
index 00000000..ee34535e
--- /dev/null
+++ b/src/test/java/org/icatproject/ids/TestSO.java
@@ -0,0 +1,231 @@
+package org.icatproject.ids;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
+
+import org.icatproject.ids.IdsBean.SO;
+import org.icatproject.ids.LockManager.Lock;
+import org.icatproject.ids.plugin.DfInfo;
+import org.icatproject.ids.plugin.DsInfo;
+import org.icatproject.ids.plugin.MainStorageInterface;
+import org.icatproject.ids.plugin.ZipMapperInterface;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.mockito.stubbing.Answer;
+
+/**
+ * Class to test the implementation of the SO (StreamingOutput) inner class in
+ * IdsBean which creates the zip files that are downloaded from the IDS
+ * containing the files which the user requested.
+ *
+ * The tests aim to check two things:
+ *
+ * 1) that the original download functionality of the IDS is not altered by the
+ * addition of the allowRestoreFailures option. The zip file containing the
+ * files requested should be returned if they all exist on main storage, or
+ * an IOException should be thrown if any are missing.
+ * 2) that the allowRestoreFailures functionality works as intended when
+ * enabled. If all files requested are present on the main storage then a
+ * zip file containing them should be returned. If any files were not found
+ * on the main storage, then those files should be listed in an additional
+ * file which also gets added to the zip. The default name of the file is
+ * MISSING_FILES.txt and will be placed in the root of the zip file but the
+ * property missingFilesZipEntryName can be set to another filename, with
+ * path if desired.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class TestSO {
+
+ @Mock
+ private PropertyHandler propertyHandler;
+ @Mock
+ private MainStorageInterface mainStorage;
+ @Mock
+ private ZipMapperInterface zipMapper;
+ @Mock
+ private Transmitter transmitter;
+ @Mock
+ private Lock lock;
+ @InjectMocks
+ private IdsBean idsBean;
+
+ private static Path tmpDirPath = Paths.get(System.getProperty("java.io.tmpdir"));
+ private static Path tmpTestDirPath = tmpDirPath.resolve("IDS_TestSO_testing");
+ private File tempZipFile;
+
+ @BeforeClass
+ public static void setupClass() throws Exception {
+ // create a temporary test directory structure containing a few files
+ int numFiles = 5;
+ List pathList = new ArrayList<>();
+ for (int i=1; i<=numFiles; i++) {
+ Path path = tmpTestDirPath.resolve("subdir" + i + "/" + "file" + i + ".txt");
+ pathList.add(path);
+ Files.createDirectories(path.getParent());
+ OutputStream out = Files.newOutputStream(path);
+ byte[] fileBytes = new String("Contents of " + path.getFileName()).getBytes();
+ out.write(fileBytes);
+ out.close();
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ when(mainStorage.get(any(), any(), any())).thenAnswer(new Answer() {
+ public InputStream answer(InvocationOnMock invocation) throws Throwable {
+ String location = invocation.getArgument(0);
+ return new FileInputStream(location);
+ }
+ });
+
+ when(zipMapper.getFullEntryName(any(), any())).thenAnswer(new Answer() {
+ public String answer(InvocationOnMock invocation) throws Throwable {
+ DfInfo dfInfo = invocation.getArgument(1);
+ return dfInfo.getDfLocation();
+ }
+ });
+
+ when(propertyHandler.getMissingFilesZipEntryName()).thenReturn("MISSING_FILES.txt");
+ }
+
+ // test that the pre-getAllowRestoreFailures behaviour still works
+ // ie. a zip file is created when all files are present
+ @Test
+ public void testSuccessfulNormalZipCreation() throws Exception {
+ when(propertyHandler.getAllowRestoreFailures()).thenReturn(false);
+ List pathList = testZipCreation(false);
+ assertTrue(checkZipContents(pathList, false));
+ }
+
+ // test that the pre-getAllowRestoreFailures behaviour still works
+ // ie. an exception is thrown if any files are not found on main storage
+ @Test(expected = IOException.class)
+ public void testFailedNormalZipCreation() throws Exception {
+ when(propertyHandler.getAllowRestoreFailures()).thenReturn(false);
+ testZipCreation(true);
+ }
+
+ // test that the getAllowRestoreFailures functionality works if there are no files missing
+ @Test
+ public void testMissingFilesZipCreationNoneMissing() throws Exception {
+ when(propertyHandler.getAllowRestoreFailures()).thenReturn(true);
+ List pathList = testZipCreation(false);
+ assertTrue(checkZipContents(pathList, false));
+ }
+
+ // test that the getAllowRestoreFailures functionality works when there are files missing
+ @Test
+ public void testMissingFilesZipCreationFilesMissing() throws Exception {
+ when(propertyHandler.getAllowRestoreFailures()).thenReturn(true);
+ List pathList = testZipCreation(true);
+ assertTrue(checkZipContents(pathList, true));
+ }
+
+ // test that the getAllowRestoreFailures functionality works when there are files missing
+ // and additionally check that a file path to the missing files listing can be used
+ @Test
+ public void testMissingFilesZipCreationFilesMissingWithPath() throws Exception {
+ when(propertyHandler.getAllowRestoreFailures()).thenReturn(true);
+ when(propertyHandler.getMissingFilesZipEntryName()).thenReturn("path/to/MISSING_FILES2.txt");
+ List pathList = testZipCreation(true);
+ assertTrue(checkZipContents(pathList, true));
+ }
+
+ private List testZipCreation(boolean addMissingFile) throws Exception {
+ tempZipFile = tmpDirPath.resolve("IDS_TestSO_" + System.currentTimeMillis() + ".zip").toFile();
+ // create a list of the files created in the class setup
+ List pathList = new ArrayList<>();
+ Files.walk(tmpTestDirPath)
+ .filter(Files::isRegularFile)
+ .forEach(pathList::add);
+ // create a dummy dsInfo for the dsInfos to reference (by ID)
+ DsInfo dsInfo1 = new DsInfoImpl(1L, "dsName", "dsLocation", 1L, "invName", "visitId", 1L, "facilityName");
+ Map dsInfos = new HashMap<>();
+ dsInfos.put(dsInfo1.getDsId(), dsInfo1);
+ // create a set of dfInfos with each referencing the dsInfo created
+ Set dfInfos = new HashSet<>();
+ Long dfId = 0L;
+ for (Path path : pathList) {
+ dfId++;
+ DfInfoImpl dfInfo = new DfInfoImpl(dfId, null, path.toString(), null, null, 1L);
+ dfInfos.add(dfInfo);
+ }
+ if (addMissingFile) {
+ // add an extra file path that does not exist to the list of files to be zipped
+ dfId++;
+ Path missingFilePath = tmpTestDirPath.resolve("non_existent_file.txt");
+ DfInfoImpl dfInfo = new DfInfoImpl(dfId, null, missingFilePath.toString(), null, null, 1L);
+ dfInfos.add(dfInfo);
+ }
+ // create the IdsBean SO (StreamingOutput) inner class
+ SO so = idsBean.new SO(dsInfos, dfInfos, 0L, true, true, lock, 0L, "ip", 0L);
+ // write the streamed zip file to the system temp dir
+ FileOutputStream fos = new FileOutputStream(tempZipFile);
+ so.write(fos);
+ fos.close();
+ return pathList;
+ }
+
+ private boolean checkZipContents(List pathList, boolean shouldContainMissingFilesList) throws IOException {
+ if (shouldContainMissingFilesList) {
+ pathList.add(Paths.get(propertyHandler.getMissingFilesZipEntryName()));
+ }
+ int origPathListSize = pathList.size();
+ ZipInputStream zis = new ZipInputStream(new FileInputStream(tempZipFile));
+ int numZipEntries = 0;
+ ZipEntry entry;
+ while ((entry = zis.getNextEntry()) != null) {
+ numZipEntries++;
+ pathList.remove(Paths.get(entry.getName()));
+ }
+ zis.close();
+ if (origPathListSize == numZipEntries && pathList.size() == 0) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ tempZipFile.delete();
+ }
+
+ @AfterClass
+ public static void tearDownClass() throws Exception {
+ // delete the test directory structure
+ // note the sorting is important to ensure that the
+ // parent directory is empty when it gets deleted
+ Files.walk(tmpTestDirPath)
+ .map(Path::toFile)
+ .sorted((o1, o2) -> -o1.compareTo(o2))
+ .forEach(File::delete);
+ }
+}
\ No newline at end of file