MalformedXML: The XML you provided was not well-formed or did not validate against our published schema - javascript

I am having this weird issue while working with AWS S3. I am working on application by which I can store the images to AWS bucket. Using Multer as middleware and S3FS library to connect and upload to AWS.
But the following error pops up when I try uploading the content.
"MalformedXML: The XML you provided was not well-formed or did not validate against our publis
hed schema"
Index.js
var express = require('express');
var router = express();
var multer = require('multer');
var fs = require('fs');
var S3FS = require('s3fs');
var upload = multer({
dest: 'uploads'
})
var S3fsImpl = new S3FS('bucket-name', {
region: 'us-east-1',
accessKeyId: 'XXXXXXXXXXXX',
secretAccessKey: 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
});
/* GET home page. */
router.get('/', function (req, res, next) {
res.render('profile', {
title: 'Express'
});
});
router.post('/testupload', upload.single('file'), function (req, res) {
var file = req.file;
console.log(file);
var path = req.file.path;
var stream = fs.createReadStream(path);
console.log(stream);
S3fsImpl.writeFile(file.name, stream).then(function () {
fs.unlink(file.path, function (err) {
if (err) {
console.log(err);
}
});
res.redirect('/profile');
})
});
module.exports = router;
EDIT
Output:
{ fieldname: 'file',
originalname: '441_1.docx',
encoding: '7bit',
mimetype: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
destination: 'uploads',
filename: '662dcbe544804e4f50dfef1f52b40d22',
path: 'uploads\\662dcbe544804e4f50dfef1f52b40d22',
size: 13938 }
ReadStream {
_readableState:
ReadableState {
objectMode: false,
highWaterMark: 65536,
buffer: BufferList { head: null, tail: null, length: 0 },
length: 0,
pipes: null,
pipesCount: 0,
flowing: null,
ended: false,
endEmitted: false,
reading: false,
sync: true,
needReadable: false,
emittedReadable: false,
readableListening: false,
resumeScheduled: false,
defaultEncoding: 'utf8',
ranOut: false,
awaitDrain: 0,
readingMore: false,
decoder: null,
encoding: null },
readable: true,
domain: null,
_events: { end: [Function] },
_eventsCount: 1,
_maxListeners: undefined,
path: 'uploads\\662dcbe544804e4f50dfef1f52b40d22',
fd: null,
flags: 'r',
mode: 438,
start: undefined,
end: undefined,
autoClose: true,
pos: undefined,
bytesRead: 0 }
Package.json
{
"name": "aws-s3-images",
"version": "1.0.0",
"private": true,
"scripts": {
"start": "node ./bin/www"
},
"dependencies": {
"body-parser": "~1.17.1",
"connect-multiparty": "^2.0.0",
"cookie-parser": "~1.4.3",
"debug": "~2.6.3",
"express": "~4.15.2",
"hbs": "~4.0.1",
"morgan": "~1.8.1",
"multer": "^1.3.0",
"s3fs": "^2.5.0",
"serve-favicon": "~2.4.2"
},
"description": "AWS S3 uploading images",
"main": "app.js",
"devDependencies": {},
"keywords": [
"javascript"
],
"author": "reeversedev",
"license": "MIT"
}

The S3 restricts the file deletion 1000 per DeleteObjectsRequest. Hence after fetching all KeyVersions List, I gave the check if the keys are >1000, then partition the list into sublists and then pass it to the DeleteObjectsRequest with sublists as below-
if (keys.size() > 1000) {
int count = 0;
List<List> partition = ListUtils.partition(keys, 1000);
for (List list : partition) {
count = count + list.size();
DeleteObjectsRequest request = new DeleteObjectsRequest(
fileSystemConfiguration.getTrackingS3BucketName()).withKeys(list);
amazonS3Client.deleteObjects(request);
logger.info("Deleted the completed directory files " + list.size() + " from folder "
+ eventSpecificS3bucket);
}
logger.info("Deleted the total directory files " + count + " from folder " + eventSpecificS3bucket);
} else {
DeleteObjectsRequest request = new DeleteObjectsRequest(
fileSystemConfiguration.getTrackingS3BucketName()).withKeys(keys);
amazonS3Client.deleteObjects(request);
logger.info("Deleted the completed directory files from folder " + eventSpecificS3bucket);
}

I got this problem when use AmplifyJS library. Follow the document in AWS homepage about Multipart upload overview:
Whenever you upload a part, Amazon S3 returns an ETag header in its
response. For each part upload, you must record the part number and
the ETag value. You need to include these values in the subsequent
request to complete the multipart upload.
But S3 default config does not do it. Just go To Permissions tab ->
Add <ExposeHeader>ETag</ExposeHeader> into CORS Configuration.
https://github.com/aws-amplify/amplify-js/issues/61

So if anyone still facing this issue, in my case the issue only happens when you pass an empty array of objects you wanna delete, it causes the server to crash with the following error "MalformedXML".
const data: S3.DeleteObjectsRequest = {
Bucket: bucketName,
Delete: {
Objects: [], <<---here
},
}
return s3Bucket.deleteObjects(data).promise()
so just check if the array of Objects keys isn't equal to zero before sending that request to aws.

As per my knowledge just cross check the BUCKET NAME.
final PutObjectRequest putObjectRequest = new PutObjectRequest(**bucketName**, accessKeyId, is ,meta);

If you use ActiveStorage with Minio add force_path_style: true to your config
# config/storage.yml
minio:
service: S3
access_key_id: name
secret_access_key: password
endpoint: http://example.com:9000/
region: us-east-1
bucket: myapp-production
force_path_style: true # add this

input := &s3.DeleteObjectsInput{
Bucket: bucketName,
Delete: &s3.Delete{Objects: objs, // <- up to 1000 keys
Quiet: aws.Bool(false)},
}
I am using aws-sdk-go sdk, when the num of the key in objs is overer 1000,I have
get the same error like:
MalformedXML: The XML you provided was not well-formed or did not validate against our published schema.
The request contains a list of up to 1000 keys .
reference:
https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html

For those which came here from talend, In my case cross check the tS3Put component's Bucket name and in the key part give any name which you want to see in the s3 as a uploaded file.
As I'm new to StackOverflow, I'm not allowed to attached images here. You can copy the below url to check it out. Thanks
https://i.stack.imgur.com/Q1pW0.png

This code should work for you. You need to remember that:
1) use unique bucket name
2) under your file object use 'originalname' instead of 'name' <-- this property does not exist
app.post('/testupload', function(req, res){
var file = req.files[0];
console.log(file.path);
console.log(file.name);
console.log('FIRST TEST: ' + JSON.stringify(file));
var stream = fs.createReadStream(file.path);
S3fsImpl.writeFile(file.originalname, stream).then(function ()
{
console.log('File has been sent - OK');
},
function(reason)
{
throw reason;
}
);
res.redirect('/index');
});

Can you try this code:
var S3fsImpl = new S3FS('bucket-name', {
region: 'us-east-1',
accessKeyId: 'XXXXXXXXXXXX',
secretAccessKey: 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
});
var fsImplStyles = S3fsImpl.getPath(file.name);
// Change us-east-1 for your region
var url = 'https://s3-us-east-1.amazonaws.com/' + fsImplStyles;
Send feedback, if this code works for you.

Related

NuxtJs deployed with docker using traefik PathPrefix won't detect page

We have blog in a NuxtJs project deployed using docker and traefik's PathPrefix('/blog') so instead of being served from https://example.com/ is served from
https://example.com/blog/
version: "3.9"
services:
{{ project_id }}-awesome-blog-{{ awesome_blog_env }}:
# etc..
labels:
- traefik.http.routers.awesome_blog-{{ awesome_blog_env }}-router.rule=Host(`{{ awesome_blog_host_dns }}`) && PathPrefix(`/blog`)
# etc..
- {{ docker_network }}
networks:
{{ docker_network }}:
external: true
The issue is that because of the PathPrefix Nuxt does not seem to know the actual page. As it's receiving /blog/ as path it doesn't match any expected route
this is what this.$router prints out in the server on the server from https://example.com/blog/
{
"name": null,
"meta": {},
"path": "/blog/",
"hash": "",
"query": {},
"params": {},
"fullPath": "/blog/",
"matched": []
}
And this is what it prints in local from http://localhost.com:3000/
{
"name": "index___en",
"meta": {},
"path": "/",
"hash": "",
"query": {},
"params": {},
"fullPath": "/",
"matched": [{
//...etc
name: "index___en",
// ..etc
}]
}
I tried using the
router: {
base: '/blog/
}
Which in local works but in the server seems not to work (doesn't even get to the nuxt site and printing the server 404)
As an "ugly" alternative I tried copying all the pages inside the /blog/ folder and they do respond but my serverMiddleware does not (returns 404)
this serverMiddleware is setup like this:
nuxt.config.js
serverMiddleware: ['~/server-middleware/index.js'],
server-middleware/index.js
const nocache = require('nocache');
const express = require('express')
const buildRouter = require('../server/router.js')
const app = express()
app.use(express.json())
app.use(nocache());
buildRouter(app)
module.exports = app
server/router.js
module.exports = function (app) {
const buildApiRoute = require('./controllers/api')
buildApiRoute(app, '/api')
}
Tried with /blog/api, too
module.exports = function (app) {
const buildApiRoute = require('./controllers/api')
buildApiRoute(app, '/blog/api')
}
Anyways, is there any way to tell nuxt to ignore the /blog/ slug of the url o something similar?
Magic code
// Before anything
this.$router.fullPath = this.$router.fullpath.replace('/blog', '/')
this.$router.path = this.$router.path.replace('/blog', '/')

Cypress environment variable undefined

In cypress.json file i have the following code
{
"baseUrl": "test",
"ignoreTestFiles": [],
"viewportHeight": 768,
"viewportWidth": 1024,
"video": false,
"env": { "email": "test#email.com", "password": "password" }
}
When i am trying to access it by calling Cypress.env('password') it shows undefined in console log when printing it, what is the issues.
const password: string = Cypress.env('password')
describe("Login to the application", () => {
beforeEach(() => {
cy.visit("/");
});
it.only("user should login successfully", () => {
console.log(Cypress.env('email')). --- undefined
loginPage.login(email, password);
cy.url().should("include", "/wallet");
});
My mistake for not knowing or not checking the location of my cypress.json file, moved it to the top cypress folder and value is shown properly.
In my Projekt (Version 10.xx) the cypress.config.ts must be in the root path not in the cypress folder. You can generate the config with the UI, to get it on the right location:
Settings > Project settings > cypress.config.ts
UPDATE for CYPRESS V10.
Extending #Artjom Prozorov answer,
Now in the newer version the cypress.json naming convention is deprecated.
So, we have to use cypress.config.ts as file name for configuration.
sample of file content given below.
import { defineConfig } from "cypress";
export default defineConfig({
e2e: {
specPattern: "src/**/*.cy.{js,jsx,ts,tsx}",
baseUrl: "http://localhost:3001",
trashAssetsBeforeRuns: false,
viewportWidth:1920,
viewportHeight:1080,
slowTestThreshold: 1000,
// watchForFileChanges : false,
env: {
apiUrl : "http://localhost:3000",
commandDelay: 100,
password: 'here it is'
},
reporter: 'mochawesome',
reporterOptions: {
reportDir: 'cypress/reports',
overwrite: false,
html: true,
json: false
},
setupNodeEvents(on, config) {
config.env.sharedSecret =
process.env.NODE_ENV === 'development' ? 'itsDev' : 'itsLocal'
return config
}
},
component: {
devServer: {
framework: "create-react-app",
bundler: "webpack"
}
}
});
NOTE : this cypress.config.ts must be inside the cypress directory.

Loopback API Error while creating the profile model

I am trying to create the Profile model in loopback it is showing the error.
error details
Unhandled error for request POST /api/Users: Error: Cannot call Profile.create(). The create method has not been setup. The PersistedModel has not been correctly attached to a DataSource!
'use strict';
var loopback = require('loopback');
var boot = require('loopback-boot');
var app = module.exports = loopback();
app.start = function() {
// start the web server
return app.listen(function() {
app.emit('started');
var baseUrl = app.get('url').replace(/\/$/, '');
console.log('Web server listening at: %s', baseUrl);
if (app.get('loopback-component-explorer')) {
var explorerPath = app.get('loopback-component-explorer').mountPath;
console.log('Browse your REST API at %s%s', baseUrl, explorerPath);
}
});
};
// Bootstrap the application, configure models, datasources and middleware.
// Sub-apps like REST API are mounted via boot scripts.
boot(app, __dirname, function(err) {
if (err) throw err;
// start the server if `$ node server.js`
if (require.main === module)
app.start();
});
console.log(Object.keys(app.models));
app.models.User.afterRemote('create',(ctx,user,next)=>{
console.log("The new User is ",user);
app.models.Profile.create({
first_name :user.username,
created_at :new Date(),
userId: user.id
}, (err,result)=>{
if(!err && result)
{
console.log("Created new profile !", result);
}
else{
console.log("There is an error ",err);
}
next();
});
});
Prfile.JSON file
{
"name": "Profile",
"base": "PersistedModel",
"idInjection": true,
"options": {
"validateUpsert": true
},
"properties": {
"first_name": {
"type": "string"
},
"last_name": {
"type": "string"
},
"birth_date":{
"type":"date"
},
"created_at": {
"type": "date"
},
"age": {
"type": "number"
},
"history":{
"type":["object"]
}
},
"validations": [],
"relations": {},
"acls": [],
"methods": {}
}
**This file is the profile.js file **
'use strict';
module.exports = function(Profile) {
};
Perhaps you still need to bind your model to the data source in the model-config.json file, like that:
datasources.json
"rdb": {
"host": "localhost",
"port": 3306,
"database": "asat",
"password": "12345",
"name": "rdb",
"user": "admin",
"connector": "mysql"
}
...
model-config.json
"Profile": {
"dataSource": "rdb",
"public": true
}
...
The reason can be any of the follwoing
Forget to add the model name in the model-config.json file
Sometime when the loopback server start the model didnt get appended to the server object properly. If the model is not appended properly we can not use the model using the app object and one server restart will solve the issue.
Datasource is not correctly mentioned in model-config file

Dojo intern set firefox profile name

Hi Iam trying to set firefox profile name in environment settings of intern config file.I have tried
environments: [
{ browserName: 'firefox',firefox_profile:'default' },
{firefox_profile:'default'}
],
and
environments: [
{ browserName: 'firefox',profile:'default' },
{profile:'default'}
],
as well as
capabilities: {
'selenium-version': '2.42.0',
firefox_profile:'default'
},
as mentioned in Selenium capabilities
But still firefox launches with an anonymous profile.
However if I use watir,
def setup
#browser = Watir::Browser.new :firefox, :profile => 'default'
goto_ecp_console_manage_page
end
browser launches the default profile which is 'kinit-ed'(kerberos)
As the Selenium capabilities page you mention points out, the value of firefox_profile must be a Base64-encoded profile. Specifically, you ZIP up a Firefox profile directory, Base64 encode it, and use that string as the value of firefox_profile. The firefox-profile npm package can make this process easier. You'll end up with something like:
environments: [
{ browserName: 'firefox', firefox_profile: 'UEsDBBQACAAIACynEk...'; },
...
],
I would recommend storing the profile string in a separate module since it's going to be around 250kb.
I used #jason0x43 suggestion to rely on the firefox-profile Node.js module and I've created the following grunt task fireforProfile4selenium. With a simple configuration set into the Gruntfile.js, the plugin writes a file on disk with the Base64 encoded version of a zipped profile!
Here is the grunt configuration:
firefoxProfile4selenium: {
options: {
proxy: {
host: '...',
port: ...
},
bypass: [ 'localhost', '127.0.0.1', '...' ]
},
default: {
files: [{
dest: 'firefoxProfile.b64.txt'
}]
}
}
Here is the plugin:
/*global require, module*/
var fs = require('fs'),
FirefoxProfile = require('firefox-profile'),
taskName = 'firefoxProfile4selenium';
module.exports = function (grunt) {
'use strict';
grunt.registerMultiTask(taskName, 'Prepares a Firefox profile for Selenium', function () {
var done = this.async(),
firefoxProfile = new FirefoxProfile(),
options = this.options(),
host = this.options().proxy.host,
port = this.options().proxy.host,
bypass = this.options().bypass,
dest = this.files[0].dest;
// Set the configuration type for considering the custom settings
firefoxProfile.setPreference('network.proxy.type', 2);
// Set the proxy host
firefoxProfile.setPreference('network.proxy.ftp', host);
firefoxProfile.setPreference('network.proxy.http', host);
firefoxProfile.setPreference('network.proxy.socks', host);
firefoxProfile.setPreference('network.proxy.ssl', host);
// Set the proxy port
firefoxProfile.setPreference('network.proxy.ftp_port', port);
firefoxProfile.setPreference('network.proxy.http_port', port);
firefoxProfile.setPreference('network.proxy.socks_port', port);
firefoxProfile.setPreference('network.proxy.ssl_port', port);
// Set the list of hosts that should bypass the proxy
firefoxProfile.setPreference('network.proxy.no_proxies_on', bypass.join(','));
firefoxProfile.encoded(function (zippedProfile) {
fs.writeFile(dest, zippedProfile, function (error) {
done(error); // FYI, done(null) reports a success, otherwise it's a failure
});
});
});
};

How to copy a folder over SSH with Gulp?

I have been experimenting with gulp lately, and have had a lot of success, but now I am stumped.
I have gulp building everything, and I want to upload a folder afterwards. I have created a deploy task for this using gulp-scp2:
gulp.task('deploy', ['clean', 'build'], function() {
var privateKeyPath = getUserHome() + '/.ssh/id_rsa';
gulp.src('public/dist')
.pipe(scp({
host: 'myhost',
username: 'user',
dest: '/home/user/test',
agent: process.env['SSH_AUTH_SOCK'],
agentForward: true,
watch: function(client) {
client.on('write', function(o) {
console.log('write %s', o.destination);
});
}
})).on('error', function(err) {
console.log(err);
});
});
Unfortunately, when I do this, I get the following error:
Error: Content should be buffer or file descriptor
How can I copy a folder over SSH using gulp?
I did end up finding a solution by leveraging the node scp2 library:
scpClient = require('scp2');
gulp.task('scp', [], function (cb) {
scpClient.scp('local_folder', {
"host": "remote_host",
"port": "remote_port",
"username": "username_on_remote",
"path": "/path/on/remote",
"agent": process.env["SSH_AUTH_SOCK"],
"agentForward": true
}, cb)
});
As the previous answer, i ended using a node version directly, this one will work in gulp 4+ way:
First install the lib (Be sure of installing locally in the project, the global version doesnt work for using in gulp file):
npm install scp2
Then in the gulp file:
var scpClient = require('scp2');
function deploySCP2(){
return scpClient.scp(paths.buildAll, {
"host": "host",
"username": "username",
"password": "password",
"path": "path"
}, function(err) { })
}
This will work rightaway.

Categories