How do I use JavaScript to call the AWS Textract service to upload a local photo for identification (without S3) - javascript

I want to call the AWS Textract service to identify the numbers in a local photo in JavaScript(without S3) and I get an error
TypeError:Cannot read property 'byteLength' of undefined ': Error in' Client.send (command)
I tried to find the correct sample in the AWS SDK for JavaScript V3 official documentation but couldn't find it.
I want to know how do I modify the code to call this service
This is my code
const {
TextractClient,
AnalyzeDocumentCommand
} = require("#aws-sdk/client-textract");
// Set the AWS region
const REGION = "us-east-2"; // The AWS Region. For example, "us-east-1".
var fs = require("fs");
var res;
var imagedata = fs.readFileSync('./1.png')
res = imagedata.toString('base64')
console.log("res2")
console.log(typeof(res))
// console.log(res)
const client = new TextractClient({ region: REGION });
const params = {
Document : {
Bytes: res
}
}
console.log("params")
console.log(typeof(params))
// console.log(params)
const command = new AnalyzeDocumentCommand(params);
console.log("command")
console.log(typeof(command))
const run = async () => {
// async/await.
try {
const data = await client.send(command);
console.log(data)
// process data.
} catch (error) {
console.log("Error");
console.log(error)
// error handling.
} finally {
// finally.
}
};
run()

Related

Is there any error in my aws Lambda function?

Can any one just tell me what's wrong with the below code.
I am getting a timeout error for this function. even though i increase the timme in aws basic settings timeout it shows the same.
from decrypting.js i am decrypting the value and using that in index.js await decryptSecret("S3_SECRET").
Is this the proper way?
can anyone help me with the best solution for this issue
index.js
const aws = require('aws-sdk');
require('dotenv').config();
const path = require("path")
const fs = require("fs")
const { decryptSecret } = require('decrypting.js');
exports.handler = function () {
try {
const directoryPath = path.resolve(__dirname, process.env.LocalPath);
fs.readdir(directoryPath, async function (error, files) {
if (error) {
console.log("Error getting directory information");
} else {
console.log("Loading lambda Function...")
let cloudStorageFiles = [];
aws.config.setPromisesDependency();
aws.config.update({
accessKeyId: process.env.S3_ACCESS_KEY,
secretAccessKey: await decryptSecret("S3_SECRET"),
// process.env.S3_SECRET,
region: process.env.S3_REGION
})
const s3 = new aws.S3();
const response = await s3.listObjectsV2({
Bucket: 'xxxxx',
Prefix: 'xxxxx'
}, function (err, data) {
if (err) {
console.log(err, err.stack);
} else {
var contents = data.Contents;
contents.forEach((content) => {
cloudStorageFiles.push(content.Key);
});
}
}).promise();
console.log('First-Cloud-File-Storage:', cloudStorageFiles)
// return cloudStorageFiles
};
console.log("Lambda function ended")
});
// return `Lambda function successfully completed`
} catch (error) {
console.log("Ooops...Error!", error)
};
};
decrypting.js
const aws = require('aws-sdk');
aws.config.update({ region: 'us-east-1' });
const kms = new aws.KMS();
const decrypted = {};
exports.decryptSecret = async function (secretName) {
if (decrypted[secretName]) {
console.log('returning cached secret-name:' + secretName);
return decrypted[secretName]
}
console.log('decrypting:' + secretName);
try {
const req = { CiphertextBlob: Buffer.from(process.env[secretName], 'base64') };
const data = await kms.decrypt(req).promise();
const decryptedVal = data.Plaintext.toString('ascii');
decrypted[secretName] = decryptedVal;
console.log('decryptedVal:', decryptedVal)
return decryptedVal;
} catch (error) {
console.log('decrypt error:', error);
throw error;
}
};
Error Message:
{
"errorMessage": "2021-02-10T06:48:52.723Z 5dec4413-f8db-49bd-8075-661ccf6ef1a4 Task timed out after 50.02 seconds"
}
loged output:
INFO Loading lambda Function...
INFO decryptingS3_SECRET
Your function is timing out because it does not have access to the internet. Since it is running inside of a VPC, it must be placed in a private subnet to have outbound internet access. This includes S3.
A private subnet in this case is a subnet where the default route (0.0.0.0/0) points to a NAT gateway and not an internet gateway.
Your function times out, because a Lambda function associated with a VPC has no internet access by default. From docs:
When you connect a function to a VPC in your account, the function can't access the internet unless your VPC provides access.
Subsequently, your function can't connect to the public endpoints of the S3 and KMS. To rectify this, there are two options:
place your function in private subnet (public will not work), setup NAT gateway in a public subnet and configure route tables so that your function can access internet using NAT. The process is explained here.
setup VPC endpoints for KMS and S3. This will allow your function to privately access these services without the need for internet access.

Making a distinction between file not present and access denied while accessing s3 object via Javascript

I have inherited the following code. This is part of CICD pipeline. It tries to get an object called "changes" from a bucket and does something with it. If it is able to grab the object, it sends a success message back to pipeline. If it fails to grab the file for whatever reason, it sends a failure message back to codepipeline.
This "changes" file is made in previous step of the codepipeline. However, sometimes it is valid for this file NOT to exist (i.e. when there IS no change).
Currently, the following code makes no distinction if file simply does not exist OR some reason code failed to get it (access denied etc.)
Desired:
I would like to send a success message back to codepipeline if file is simply not there.
If there is access issue , then the current outcome of "failure' would still be valid.
Any help is greatly appreciated. Unfortunately I am not good enough with Javascript to have any ideas to try.
RELEVANT PARTS OF THE CODE
const AWS = require("aws-sdk");
const s3 = new AWS.S3();
const lambda = new AWS.Lambda();
const codePipeline = new AWS.CodePipeline();
// GET THESE FROM ENV Variables
const {
API_SOURCE_S3_BUCKET: s3Bucket,
ENV: env
} = process.env;
const jobSuccess = (CodePipeline, params) => {
return new Promise((resolve, reject) => {
CodePipeline.putJobSuccessResult(params, (err, data) => {
if (err) { reject(err); }
else { resolve(data); }
});
});
};
const jobFailure = (CodePipeline, params) => {
return new Promise((resolve, reject) => {
CodePipeline.putJobFailureResult(params, (err, data) => {
if (err) { reject(err); }
else { resolve(data); }
});
});
};
// MAIN CALLER FUNCTION. STARTING POINT
exports.handler = async (event, context, callback) => {
try {
// WHAT IS IN changes file in S3
let changesFile = await getObject(s3, s3Bucket, `lambda/${version}/changes`);
let changes = changesFile.trim().split("\n");
console.log("List of Changes");
console.log(changes);
let params = { jobId };
let jobSuccessResponse = await jobSuccess(codePipeline, params);
context.succeed("Job Success");
}
catch (exception) {
let message = "Job Failure (General)";
let failureParams = {
jobId,
failureDetails: {
message: JSON.stringify(message),
type: "JobFailed",
externalExecutionId: context.invokeid
}
};
let jobFailureResponse = await jobFailure(codePipeline, failureParams);
console.log(message, exception);
context.fail(`${message}: ${exception}`);
}
};
S3 should return an error code in the exception:
The ones you care about are below:
AccessDenied - Access Denied
NoSuchKey - The specified key does not exist.
So in your catch block you should be able to validate exception.code to check if it matches one of these 2.

AWS Kendra sdk call will not return results

I have been following the AWS-Kendra react-search app example you can find here:
https://docs.aws.amazon.com/kendra/latest/dg/deploying.html
After importing the Kendra client with:
const kendra = require('aws-sdk/clients/kendra');
const kendraClient = new kendra({apiVersion: '2019-02-03', region: 'us-east-1'});
Any call on kendraClient to any of the kendra services returns null. I have been executing queries with:
const results = kendraClient.query({ IndexId: INDEX_ID, QueryText: queryText});
Which returns a request object with null data and error fields.
I have calls to S3 which execute correctly in the same file so I do not believe it to be an authentication problem. If I had to guess it's some issue with how I created the kendra object and client, the usual
kendra = new AWS.Kendra();
doesn't work because Kendra is not part of the browser version of the SDK.
Are you trying to run js from browser directly? Here is a sample nodejs code
var kendra = require("aws-sdk/clients/kendra");
var kendraClient = new kendra({apiVersion: "2019-02-03", region: "us-west-2"});
exports.handler = function (event) {
try{
console.log("Starting....");
var params = {
IndexId: "<<Enter your indexId here>>",
QueryText: "<<Enter your queryText here>>",
QueryResultTypeFilter: "DOCUMENT",
PageNumber: 1
};
var kendraResponse = kendraClient.query(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log("Kendra result is", data); // successful response
});
const response = {
"dialogAction":
{
"fulfillmentState":"Fulfilled",
"type":"Close","message":
{
"contentType":"PlainText"
}
}
}
return response;
} catch (error) {
console.log(error)
}
};

Is there anyway to set a aws endpoint globally?

I’d like to set the S3 endpoint globally, once, rather than setting it each time I make a new AWS.S3() call. Is this possible?
This is different than global region setting. I’m looking to set a per service endpoint globally.
create a file named aws.js inside that file create all the functions you want to use for your s3 bucket i.e for uploading content or deleting content.
then just export that file and use it anywhere you want.
e.g : i'm using SQS in my case:
const AWS = require("aws-sdk");
AWS.config.update({ region: "us-east-1" });
// Create an SQS service object
const sqs = new AWS.SQS({ apiVersion: "2012-11-05", "accessKeyId": process.env.ACCESS_KEY_ID, "secretAccessKey": process.env.SECRET_ACCESS_KEY });
const QueueUrl = process.env.SQS_QUEUE_URL;
const sendPayloadToSQS = async message => {
const params = {
MessageBody: JSON.stringify(message),
QueueUrl
};
const request = sqs.sendMessage(params);
const result = await request.promise();
if (result) {
console.log(`Message queued to SQS successfully : ${result.MessageId}`);
} else {
console.log("Message queued failed");
}
};
module.exports = sendPayloadToSQS;
What is your aws-sdk version?
const AWS = require("aws-sdk");
console.log(new AWS.S3().config.endpoint); // s3.amazonaws.com
AWS.config.s3 = { endpoint: "https://my-endpoint" }; // sure this line has been call first of all
console.log(new AWS.S3().config.endpoint); // https://my-endpoint

ECONRESET socket hungup

I have a function that triggers on firebase database onWrite. The function body use two google cloud apis (DNS and Storage).
While the function is running and working as expected (mostly), the issue is that the Socket hang up more often than I'd like. (50%~ of times)
My questions are:
Is it similar to what the rest of the testers have experienced? Is it a well known issue that is outstanding or expected behavior?
the example code is as follows:
const functions = require('firebase-functions');
const admin = require('firebase-admin');
const {credentials} = functions.config().auth;
credentials.private_key = credentials.private_key.replace(/\\n/g, '\n');
const config = Object.assign({}, functions.config().firebase, {credentials});
admin.initializeApp(config);
const gcs = require('#google-cloud/storage')({credentials});
const dns = require('#google-cloud/dns')({credentials});
const zoneName = 'applambda';
const zone = dns.zone(zoneName);
exports.createDeleteDNSAndStorage = functions.database.ref('/apps/{uid}/{appid}/name')
.onWrite(event => {
// Only edit data when it is first created.
const {uid, appid} = event.params;
const name = event.data.val();
const dbRef = admin.database().ref(`/apps/${uid}/${appid}`);
if (event.data.previous.exists()) {
console.log(`already exists ${uid}/${appid}`);
return;
}
// Exit when the data is deleted.
if (!event.data.exists()) {
console.log(`data is being deleted ${uid}/${appid}`);
return;
}
const url = `${name}.${zoneName}.com`;
console.log(`data: ${uid}/${appid}/${name}\nsetting up: ${url}`);
setupDNS({url, dbRef});
setupStorage({url, dbRef});
return;
});
function setupDNS({url, dbRef}) {
// Create an NS record.
let cnameRecord = zone.record('cname', {
name: `${url}.`,
data: 'c.storage.googleapis.com.',
ttl: 3000
});
zone.addRecords(cnameRecord).then(function() {
console.log(`done setting up zonerecord for ${url}`);
dbRef.update({dns: url}).then(res => console.log(res)).catch(err => console.log(err));
}).catch(function(err) {
console.error(`error setting up zonerecord for ${url}`);
console.error(err);
});
}
function setupStorage({url, dbRef}) {
console.log(`setting up storage bucket for ${url}`);
gcs.createBucket(url, {
website: {
mainPageSuffix: `https://${url}`,
notFoundPage: `https://${url}/404.html`
}
}).then(function(res) {
let bucket = res[0];
console.log(`created bucket ${url}, setting it as public`);
dbRef.update({storage: url}).then(function() {
console.log(`done setting up bucket for ${url}`);
}).catch(function(err) {
console.error(`db update for storage failed ${url}`);
console.error(err);
});
bucket.makePublic().then(function() {
console.log(`bucket set as public for ${url}`);
}).catch(function(err) {
console.error(`setting public for storage failed ${url}`);
console.error(err);
});
}).catch(function(err) {
console.error(`creating bucket failed ${url}`);
console.error(err);
});
}
I'm thinking your function needs to return a promise so that all the other async work has time to complete before the function shuts down. As it's shown now, your functions simply returns immediately without waiting for the work to complete.
I don't know the cloud APIs you're using very well, but I'd guess that you should make your setupDns() and setupStorage() return the promises from the async work that they're doing, then return Promise.all() passing those two promises to let Cloud Functions know it should wait until all that work is complete before cleaning up the container that's running the function.

Categories