AWS Kendra sdk call will not return results - javascript

I have been following the AWS-Kendra react-search app example you can find here:
https://docs.aws.amazon.com/kendra/latest/dg/deploying.html
After importing the Kendra client with:
const kendra = require('aws-sdk/clients/kendra');
const kendraClient = new kendra({apiVersion: '2019-02-03', region: 'us-east-1'});
Any call on kendraClient to any of the kendra services returns null. I have been executing queries with:
const results = kendraClient.query({ IndexId: INDEX_ID, QueryText: queryText});
Which returns a request object with null data and error fields.
I have calls to S3 which execute correctly in the same file so I do not believe it to be an authentication problem. If I had to guess it's some issue with how I created the kendra object and client, the usual
kendra = new AWS.Kendra();
doesn't work because Kendra is not part of the browser version of the SDK.

Are you trying to run js from browser directly? Here is a sample nodejs code
var kendra = require("aws-sdk/clients/kendra");
var kendraClient = new kendra({apiVersion: "2019-02-03", region: "us-west-2"});
exports.handler = function (event) {
try{
console.log("Starting....");
var params = {
IndexId: "<<Enter your indexId here>>",
QueryText: "<<Enter your queryText here>>",
QueryResultTypeFilter: "DOCUMENT",
PageNumber: 1
};
var kendraResponse = kendraClient.query(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log("Kendra result is", data); // successful response
});
const response = {
"dialogAction":
{
"fulfillmentState":"Fulfilled",
"type":"Close","message":
{
"contentType":"PlainText"
}
}
}
return response;
} catch (error) {
console.log(error)
}
};

Related

How would I set the status of an agent from "Missed" to "Available" in Amazon Connect?

we are using Amazon Connect and we are running into an issue.
On the service-desk, we have a "standby shift", this means the user that is in that queue/shift is the only on-call. However, if he/she misses said call. They are stuck in the "Missed" state within the Connect CCP. I already tried the custom CCP method, however this does not fit our requirements yet.
Our second option is making a lambda that gets executed on a missed call (Right before the disconnect of a user) to remove the "Missed" state and set it on "Available". This is what I tried to do so:
var AWS = require('aws-sdk');
exports.handler = async (event) => {
var connect = new AWS.Connect();
let agentARN = event['Details']['Parameters']['agent'];
var params = {
AgentStatusId: 'Available',
InstanceId: 'arn:aws:connect:eu-central-1:ID:ARN',
UserId: agentARN
};
let errors;
connect.putUserStatus(params, function(err, data) {
if (err) errors = err.stack; // an error occurred
else errors = data; // successful response
});
const response = {
statusCode: 200,
body: errors
};
return response;
};
Any ideas?

How do I use JavaScript to call the AWS Textract service to upload a local photo for identification (without S3)

I want to call the AWS Textract service to identify the numbers in a local photo in JavaScript(without S3) and I get an error
TypeError:Cannot read property 'byteLength' of undefined ': Error in' Client.send (command)
I tried to find the correct sample in the AWS SDK for JavaScript V3 official documentation but couldn't find it.
I want to know how do I modify the code to call this service
This is my code
const {
TextractClient,
AnalyzeDocumentCommand
} = require("#aws-sdk/client-textract");
// Set the AWS region
const REGION = "us-east-2"; // The AWS Region. For example, "us-east-1".
var fs = require("fs");
var res;
var imagedata = fs.readFileSync('./1.png')
res = imagedata.toString('base64')
console.log("res2")
console.log(typeof(res))
// console.log(res)
const client = new TextractClient({ region: REGION });
const params = {
Document : {
Bytes: res
}
}
console.log("params")
console.log(typeof(params))
// console.log(params)
const command = new AnalyzeDocumentCommand(params);
console.log("command")
console.log(typeof(command))
const run = async () => {
// async/await.
try {
const data = await client.send(command);
console.log(data)
// process data.
} catch (error) {
console.log("Error");
console.log(error)
// error handling.
} finally {
// finally.
}
};
run()

Is there any error in my aws Lambda function?

Can any one just tell me what's wrong with the below code.
I am getting a timeout error for this function. even though i increase the timme in aws basic settings timeout it shows the same.
from decrypting.js i am decrypting the value and using that in index.js await decryptSecret("S3_SECRET").
Is this the proper way?
can anyone help me with the best solution for this issue
index.js
const aws = require('aws-sdk');
require('dotenv').config();
const path = require("path")
const fs = require("fs")
const { decryptSecret } = require('decrypting.js');
exports.handler = function () {
try {
const directoryPath = path.resolve(__dirname, process.env.LocalPath);
fs.readdir(directoryPath, async function (error, files) {
if (error) {
console.log("Error getting directory information");
} else {
console.log("Loading lambda Function...")
let cloudStorageFiles = [];
aws.config.setPromisesDependency();
aws.config.update({
accessKeyId: process.env.S3_ACCESS_KEY,
secretAccessKey: await decryptSecret("S3_SECRET"),
// process.env.S3_SECRET,
region: process.env.S3_REGION
})
const s3 = new aws.S3();
const response = await s3.listObjectsV2({
Bucket: 'xxxxx',
Prefix: 'xxxxx'
}, function (err, data) {
if (err) {
console.log(err, err.stack);
} else {
var contents = data.Contents;
contents.forEach((content) => {
cloudStorageFiles.push(content.Key);
});
}
}).promise();
console.log('First-Cloud-File-Storage:', cloudStorageFiles)
// return cloudStorageFiles
};
console.log("Lambda function ended")
});
// return `Lambda function successfully completed`
} catch (error) {
console.log("Ooops...Error!", error)
};
};
decrypting.js
const aws = require('aws-sdk');
aws.config.update({ region: 'us-east-1' });
const kms = new aws.KMS();
const decrypted = {};
exports.decryptSecret = async function (secretName) {
if (decrypted[secretName]) {
console.log('returning cached secret-name:' + secretName);
return decrypted[secretName]
}
console.log('decrypting:' + secretName);
try {
const req = { CiphertextBlob: Buffer.from(process.env[secretName], 'base64') };
const data = await kms.decrypt(req).promise();
const decryptedVal = data.Plaintext.toString('ascii');
decrypted[secretName] = decryptedVal;
console.log('decryptedVal:', decryptedVal)
return decryptedVal;
} catch (error) {
console.log('decrypt error:', error);
throw error;
}
};
Error Message:
{
"errorMessage": "2021-02-10T06:48:52.723Z 5dec4413-f8db-49bd-8075-661ccf6ef1a4 Task timed out after 50.02 seconds"
}
loged output:
INFO Loading lambda Function...
INFO decryptingS3_SECRET
Your function is timing out because it does not have access to the internet. Since it is running inside of a VPC, it must be placed in a private subnet to have outbound internet access. This includes S3.
A private subnet in this case is a subnet where the default route (0.0.0.0/0) points to a NAT gateway and not an internet gateway.
Your function times out, because a Lambda function associated with a VPC has no internet access by default. From docs:
When you connect a function to a VPC in your account, the function can't access the internet unless your VPC provides access.
Subsequently, your function can't connect to the public endpoints of the S3 and KMS. To rectify this, there are two options:
place your function in private subnet (public will not work), setup NAT gateway in a public subnet and configure route tables so that your function can access internet using NAT. The process is explained here.
setup VPC endpoints for KMS and S3. This will allow your function to privately access these services without the need for internet access.

Making a distinction between file not present and access denied while accessing s3 object via Javascript

I have inherited the following code. This is part of CICD pipeline. It tries to get an object called "changes" from a bucket and does something with it. If it is able to grab the object, it sends a success message back to pipeline. If it fails to grab the file for whatever reason, it sends a failure message back to codepipeline.
This "changes" file is made in previous step of the codepipeline. However, sometimes it is valid for this file NOT to exist (i.e. when there IS no change).
Currently, the following code makes no distinction if file simply does not exist OR some reason code failed to get it (access denied etc.)
Desired:
I would like to send a success message back to codepipeline if file is simply not there.
If there is access issue , then the current outcome of "failure' would still be valid.
Any help is greatly appreciated. Unfortunately I am not good enough with Javascript to have any ideas to try.
RELEVANT PARTS OF THE CODE
const AWS = require("aws-sdk");
const s3 = new AWS.S3();
const lambda = new AWS.Lambda();
const codePipeline = new AWS.CodePipeline();
// GET THESE FROM ENV Variables
const {
API_SOURCE_S3_BUCKET: s3Bucket,
ENV: env
} = process.env;
const jobSuccess = (CodePipeline, params) => {
return new Promise((resolve, reject) => {
CodePipeline.putJobSuccessResult(params, (err, data) => {
if (err) { reject(err); }
else { resolve(data); }
});
});
};
const jobFailure = (CodePipeline, params) => {
return new Promise((resolve, reject) => {
CodePipeline.putJobFailureResult(params, (err, data) => {
if (err) { reject(err); }
else { resolve(data); }
});
});
};
// MAIN CALLER FUNCTION. STARTING POINT
exports.handler = async (event, context, callback) => {
try {
// WHAT IS IN changes file in S3
let changesFile = await getObject(s3, s3Bucket, `lambda/${version}/changes`);
let changes = changesFile.trim().split("\n");
console.log("List of Changes");
console.log(changes);
let params = { jobId };
let jobSuccessResponse = await jobSuccess(codePipeline, params);
context.succeed("Job Success");
}
catch (exception) {
let message = "Job Failure (General)";
let failureParams = {
jobId,
failureDetails: {
message: JSON.stringify(message),
type: "JobFailed",
externalExecutionId: context.invokeid
}
};
let jobFailureResponse = await jobFailure(codePipeline, failureParams);
console.log(message, exception);
context.fail(`${message}: ${exception}`);
}
};
S3 should return an error code in the exception:
The ones you care about are below:
AccessDenied - Access Denied
NoSuchKey - The specified key does not exist.
So in your catch block you should be able to validate exception.code to check if it matches one of these 2.

TypeError: AWS.rekognition is not a constructor

const AWS = require('aws-sdk')
AWS.config.loadFromPath('./credentials.json');
AWS.config.update({region:'us-east-1'});
var rekognition = new AWS.rekognition();
var params = {
CollectionId: "sammple",
DetectionAttributes: [
],
ExternalImageId: "facialrekogntition", //TODo
Image: {
S3Object: {
Bucket: "facerekognition12",
Name: "download.jpg"
}
}
};
rekognition.indexFaces(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
Whenever I use node index.js, I am getting the error mentioned in the title.
Note - I have my credentials stored in the JSON file and also installed AWS-SDK for node.
It's new AWS.Rekognition(); basically capital R

Categories