Making a distinction between file not present and access denied while accessing s3 object via Javascript - javascript

I have inherited the following code. This is part of CICD pipeline. It tries to get an object called "changes" from a bucket and does something with it. If it is able to grab the object, it sends a success message back to pipeline. If it fails to grab the file for whatever reason, it sends a failure message back to codepipeline.
This "changes" file is made in previous step of the codepipeline. However, sometimes it is valid for this file NOT to exist (i.e. when there IS no change).
Currently, the following code makes no distinction if file simply does not exist OR some reason code failed to get it (access denied etc.)
Desired:
I would like to send a success message back to codepipeline if file is simply not there.
If there is access issue , then the current outcome of "failure' would still be valid.
Any help is greatly appreciated. Unfortunately I am not good enough with Javascript to have any ideas to try.
RELEVANT PARTS OF THE CODE
const AWS = require("aws-sdk");
const s3 = new AWS.S3();
const lambda = new AWS.Lambda();
const codePipeline = new AWS.CodePipeline();
// GET THESE FROM ENV Variables
const {
API_SOURCE_S3_BUCKET: s3Bucket,
ENV: env
} = process.env;
const jobSuccess = (CodePipeline, params) => {
return new Promise((resolve, reject) => {
CodePipeline.putJobSuccessResult(params, (err, data) => {
if (err) { reject(err); }
else { resolve(data); }
});
});
};
const jobFailure = (CodePipeline, params) => {
return new Promise((resolve, reject) => {
CodePipeline.putJobFailureResult(params, (err, data) => {
if (err) { reject(err); }
else { resolve(data); }
});
});
};
// MAIN CALLER FUNCTION. STARTING POINT
exports.handler = async (event, context, callback) => {
try {
// WHAT IS IN changes file in S3
let changesFile = await getObject(s3, s3Bucket, `lambda/${version}/changes`);
let changes = changesFile.trim().split("\n");
console.log("List of Changes");
console.log(changes);
let params = { jobId };
let jobSuccessResponse = await jobSuccess(codePipeline, params);
context.succeed("Job Success");
}
catch (exception) {
let message = "Job Failure (General)";
let failureParams = {
jobId,
failureDetails: {
message: JSON.stringify(message),
type: "JobFailed",
externalExecutionId: context.invokeid
}
};
let jobFailureResponse = await jobFailure(codePipeline, failureParams);
console.log(message, exception);
context.fail(`${message}: ${exception}`);
}
};

S3 should return an error code in the exception:
The ones you care about are below:
AccessDenied - Access Denied
NoSuchKey - The specified key does not exist.
So in your catch block you should be able to validate exception.code to check if it matches one of these 2.

Related

How do I use JavaScript to call the AWS Textract service to upload a local photo for identification (without S3)

I want to call the AWS Textract service to identify the numbers in a local photo in JavaScript(without S3) and I get an error
TypeError:Cannot read property 'byteLength' of undefined ': Error in' Client.send (command)
I tried to find the correct sample in the AWS SDK for JavaScript V3 official documentation but couldn't find it.
I want to know how do I modify the code to call this service
This is my code
const {
TextractClient,
AnalyzeDocumentCommand
} = require("#aws-sdk/client-textract");
// Set the AWS region
const REGION = "us-east-2"; // The AWS Region. For example, "us-east-1".
var fs = require("fs");
var res;
var imagedata = fs.readFileSync('./1.png')
res = imagedata.toString('base64')
console.log("res2")
console.log(typeof(res))
// console.log(res)
const client = new TextractClient({ region: REGION });
const params = {
Document : {
Bytes: res
}
}
console.log("params")
console.log(typeof(params))
// console.log(params)
const command = new AnalyzeDocumentCommand(params);
console.log("command")
console.log(typeof(command))
const run = async () => {
// async/await.
try {
const data = await client.send(command);
console.log(data)
// process data.
} catch (error) {
console.log("Error");
console.log(error)
// error handling.
} finally {
// finally.
}
};
run()

AWS Kendra sdk call will not return results

I have been following the AWS-Kendra react-search app example you can find here:
https://docs.aws.amazon.com/kendra/latest/dg/deploying.html
After importing the Kendra client with:
const kendra = require('aws-sdk/clients/kendra');
const kendraClient = new kendra({apiVersion: '2019-02-03', region: 'us-east-1'});
Any call on kendraClient to any of the kendra services returns null. I have been executing queries with:
const results = kendraClient.query({ IndexId: INDEX_ID, QueryText: queryText});
Which returns a request object with null data and error fields.
I have calls to S3 which execute correctly in the same file so I do not believe it to be an authentication problem. If I had to guess it's some issue with how I created the kendra object and client, the usual
kendra = new AWS.Kendra();
doesn't work because Kendra is not part of the browser version of the SDK.
Are you trying to run js from browser directly? Here is a sample nodejs code
var kendra = require("aws-sdk/clients/kendra");
var kendraClient = new kendra({apiVersion: "2019-02-03", region: "us-west-2"});
exports.handler = function (event) {
try{
console.log("Starting....");
var params = {
IndexId: "<<Enter your indexId here>>",
QueryText: "<<Enter your queryText here>>",
QueryResultTypeFilter: "DOCUMENT",
PageNumber: 1
};
var kendraResponse = kendraClient.query(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log("Kendra result is", data); // successful response
});
const response = {
"dialogAction":
{
"fulfillmentState":"Fulfilled",
"type":"Close","message":
{
"contentType":"PlainText"
}
}
}
return response;
} catch (error) {
console.log(error)
}
};

File metadata is not getting updated in Firestore Storage

I have created a Cloud Function that trigger on any new file upload in Firebase Storage. Once successful upload function will update its metadata, but even though setting new metadata with 'setMetadata()' is not getting applied. There is no error during the process and but on checking for updated metadata, the new one is not reflecting.
exports.onImageUpload = functions.storage.object().onFinalize(async (object) => {
const storageRef = admin.storage().bucket(object.bucket);
var metadata = {
'uploader': 'unknown'
}
await storageRef.file(object.name).setMetadata(metadata).then(function(data) {
console.log('Success');
console.log(data);
return;
}).catch(function(error) {
console.log(error);
return ;
});
return;
});
There is no error, and on Cloud Function log its printing 'Success' message. Also "metageneration: '2'" property also got updated, which means it should have updated metadata with new values, but it didn't.
The problem comes from the fact that if you want to set custom key/value pairs they must be in the metadata key of the object you pass to the setMetadata() method, i.e. the metadata object in your case. This is explained in the API Reference Documentation for node.js.
So the following will work:
exports.onImageUpload = functions.storage.object().onFinalize(async (object) => {
const storageRef = admin.storage().bucket(object.bucket);
var metadata = {
metadata: {
'uploader': 'unknown'
}
}
try {
const setFileMetadataResponse = await storageRef.file(object.name).setMetadata(metadata);
console.log('Success');
console.log(setFileMetadataResponse[0]);
return null;
} catch (error) {
console.log(error);
return null;
}
});

returning status once the file is written to local from s3 bucket

trying to fetch a file from s3 bucket and storing it on the local, once its written to the local reading the file from the local and converting the data to json format and sending it.
i need to check whether the file is downloaded and written to local, once the file exist only read and convert it to json else send an error message.
once the file is on open i am writing the file and making end. So after end i can't send a return value. So how i can solve this one and use try catch to send proper error message.
const fetchFileDownloadAndWriteIt = () => {
let Bucket = "DataBucket";
let filename = "sample_data.csv";
let s3 = new AWS.S3();
const params = {
Bucket: Bucket,
Key: filename
};
return s3.getObject(params)
.promise()
.then(data => {
const file = fs.createWriteStream('./localdata/' + filename);
file.on("open", () => {
file.write(data.Body);
file.end();
})
.on("error", err => {
console.log("Error Occured while writing", err.message)
})
})
.catch(err => {
console.log("unable to fetch file from s3 Bucket", err.message)
})
}
exports.fetchData = async (req,res) => {
let fileDownloadAndWrite = await fetchFileAndDownloadWriteIt();
// need to check file is downloaded and written properly
const path = "./localdata/sample_data.csv";
const json = await csv().fromFile(path);
res.send({data: json})
}
You can return a new Promise instead of the one instead of the one you get by calling the SDK's API.
return new Promise((res, rej) => {
s3.getObject(params)
.promise()
.then(data => {
const file = fs.createWriteStream('./localdata/' + filename);
file
.on("open", () => {
file.write(data.Body);
file.end();
//success
res();
})
.on("error", err => {
rej(err);
})
})
.catch(err => {
rej(err);
})
});
This will resolve to undefined and rejected with the proper error occured, like while writing file, etc.
How to Call it in your handler?
Something like this would be fine.
exports.fetchData = async (req, res, next) => {
try {
await fetchFileDownloadAndWriteIt();
// need to check file is downloaded and written properly - here the file is actually downloaded and written properly.
const path = "./localdata/sample_data.csv";
const json = await csv().fromFile(path);
res.send({ data: json })
}
catch (err) {
return next(err);
}
}

ECONRESET socket hungup

I have a function that triggers on firebase database onWrite. The function body use two google cloud apis (DNS and Storage).
While the function is running and working as expected (mostly), the issue is that the Socket hang up more often than I'd like. (50%~ of times)
My questions are:
Is it similar to what the rest of the testers have experienced? Is it a well known issue that is outstanding or expected behavior?
the example code is as follows:
const functions = require('firebase-functions');
const admin = require('firebase-admin');
const {credentials} = functions.config().auth;
credentials.private_key = credentials.private_key.replace(/\\n/g, '\n');
const config = Object.assign({}, functions.config().firebase, {credentials});
admin.initializeApp(config);
const gcs = require('#google-cloud/storage')({credentials});
const dns = require('#google-cloud/dns')({credentials});
const zoneName = 'applambda';
const zone = dns.zone(zoneName);
exports.createDeleteDNSAndStorage = functions.database.ref('/apps/{uid}/{appid}/name')
.onWrite(event => {
// Only edit data when it is first created.
const {uid, appid} = event.params;
const name = event.data.val();
const dbRef = admin.database().ref(`/apps/${uid}/${appid}`);
if (event.data.previous.exists()) {
console.log(`already exists ${uid}/${appid}`);
return;
}
// Exit when the data is deleted.
if (!event.data.exists()) {
console.log(`data is being deleted ${uid}/${appid}`);
return;
}
const url = `${name}.${zoneName}.com`;
console.log(`data: ${uid}/${appid}/${name}\nsetting up: ${url}`);
setupDNS({url, dbRef});
setupStorage({url, dbRef});
return;
});
function setupDNS({url, dbRef}) {
// Create an NS record.
let cnameRecord = zone.record('cname', {
name: `${url}.`,
data: 'c.storage.googleapis.com.',
ttl: 3000
});
zone.addRecords(cnameRecord).then(function() {
console.log(`done setting up zonerecord for ${url}`);
dbRef.update({dns: url}).then(res => console.log(res)).catch(err => console.log(err));
}).catch(function(err) {
console.error(`error setting up zonerecord for ${url}`);
console.error(err);
});
}
function setupStorage({url, dbRef}) {
console.log(`setting up storage bucket for ${url}`);
gcs.createBucket(url, {
website: {
mainPageSuffix: `https://${url}`,
notFoundPage: `https://${url}/404.html`
}
}).then(function(res) {
let bucket = res[0];
console.log(`created bucket ${url}, setting it as public`);
dbRef.update({storage: url}).then(function() {
console.log(`done setting up bucket for ${url}`);
}).catch(function(err) {
console.error(`db update for storage failed ${url}`);
console.error(err);
});
bucket.makePublic().then(function() {
console.log(`bucket set as public for ${url}`);
}).catch(function(err) {
console.error(`setting public for storage failed ${url}`);
console.error(err);
});
}).catch(function(err) {
console.error(`creating bucket failed ${url}`);
console.error(err);
});
}
I'm thinking your function needs to return a promise so that all the other async work has time to complete before the function shuts down. As it's shown now, your functions simply returns immediately without waiting for the work to complete.
I don't know the cloud APIs you're using very well, but I'd guess that you should make your setupDns() and setupStorage() return the promises from the async work that they're doing, then return Promise.all() passing those two promises to let Cloud Functions know it should wait until all that work is complete before cleaning up the container that's running the function.

Categories