Don't remove creatures until archive has written to s3

This commit is contained in:
Thaum Rystra
2024-03-26 13:40:58 +02:00
parent 3a6639fbc3
commit a3ea5e1408
3 changed files with 30 additions and 9 deletions

View File

@@ -1,3 +1,4 @@
import { Meteor } from 'meteor/meteor';
import SCHEMA_VERSION from '/imports/constants/SCHEMA_VERSION';
import SimpleSchema from 'simpl-schema';
import { ValidatedMethod } from 'meteor/mdg:validated-method';
@@ -32,7 +33,7 @@ export function getArchiveObj(creatureId) {
return archiveCreature;
}
export function archiveCreature(creatureId) {
export const archiveCreature = Meteor.wrapAsync(function archiveCreatureFn(creatureId, callback) {
const archive = getArchiveObj(creatureId);
const buffer = Buffer.from(JSON.stringify(archive, null, 2));
ArchiveCreatureFiles.write(buffer, {
@@ -44,14 +45,29 @@ export function archiveCreature(creatureId) {
creatureId: archive.creature._id,
creatureName: archive.creature.name,
},
}, (error) => {
if (error) {
throw error;
}, (error, fileRef) => {
if (error || !Meteor.settings.useS3) {
// If there is an error, or we aren't using s3, just call the callback
callback(error);
} else {
removeCreatureWork(creatureId);
// Wait for s3Result event that occurs when the s3 attempt to write ends.
// If it's successful, remove the creature, otherwise callback with error
const resultHandler = (s3Error, resultRef) => {
// This event is for a different file, ignore it
if (resultRef._id !== fileRef._id) return;
// Remove this handler, we are only running it once for this fileId
ArchiveCreatureFiles.off('s3Result', resultHandler);
// Remove the creature if there was no error
if (!s3Error) {
removeCreatureWork(creatureId);
}
// Alert the callback that we're done
callback(s3Error);
}
ArchiveCreatureFiles.on('s3Result', resultHandler);
}
}, true);
}
});
const archiveCreatureToFile = new ValidatedMethod({
name: 'Creatures.methods.archiveCreatureToFile',

View File

@@ -44,6 +44,7 @@ if (Meteor.settings.useS3) {
accessKeyId: s3Conf.key,
secretAccessKey: s3Conf.secret,
},
region: 'ENAM',
endpoint: s3Conf.endpoint,
tls: true,
maxAttempts: 10,
@@ -96,6 +97,7 @@ if (Meteor.settings.useS3) {
}, (error: Error) => {
bound(() => {
if (error) {
this.emit('s3Result', error, fileRef);
return console.error(error);
}
// Update FilesCollection with link to the file at AWS
@@ -111,10 +113,12 @@ if (Meteor.settings.useS3) {
_id: fileRef._id
}, upd, undefined, (updError: any) => {
if (updError) {
this.emit('s3Result', updError, fileRef);
console.error(updError);
} else {
// Unlink original files from FS after successful upload to AWS:S3
filesCollection.unlink(filesCollection.findOne(fileRef._id), version);
this.emit('s3Result', undefined, fileRef)
}
});
});
@@ -126,7 +130,7 @@ if (Meteor.settings.useS3) {
// And redirect request to AWS:S3
let path;
if (fileRef && fileRef.versions && fileRef.versions[version] && fileRef.versions[version].meta && fileRef.versions[version].meta.pipePath) {
if (fileRef?.versions?.[version]?.meta?.pipePath) {
path = fileRef.versions[version].meta.pipePath;
}
@@ -195,7 +199,7 @@ if (Meteor.settings.useS3) {
const cursor = this.collection.find(search);
cursor.forEach((fileRef) => {
each(fileRef.versions, (vRef) => {
if (vRef && vRef.meta && vRef.meta.pipePath) {
if (vRef?.meta?.pipePath) {
// Remove the object from AWS:S3 first, then we will call the original FilesCollection remove
s3.deleteObject({
Bucket: s3Conf.bucket,
@@ -224,7 +228,7 @@ if (Meteor.settings.useS3) {
Key: path
});
if (!data.Body) return;
return JSON.parse(data.Body.toString());
return JSON.parse(await data.Body.transformToString());
} else {
// Otherwise use the normal filesystem
const fileString = await fsp.readFile(file.path, 'utf8');

View File

@@ -273,6 +273,7 @@ const ComputedOnlyAttributeSchema = createPropertySchema({
type: String,
},
'definitions.$.type': {
optional: true,
type: String,
},
'definitions.$.row': {