Uploading file error: Request entity too large

I’m using parse-server on my computer (windows10), when I try to upload a zip file (50Mb) via parse-dashboard, It gives me an error:
On dashboard: Request entity too large
In the log: “code”:130,“level”:“error”,“message”:“Invalid file upload.”

I tried FSAdapter too, but still not working.

You are probably hitting the limit of the Express.js body parser body limit. Take a look at this thread: javascript - Error: request entity too large - Stack Overflow

Thank you davimacedo, but it didn’t work for me.

According to maxUploadSize, I add maxUploadSize = '200mb' to configuration option.

But it’s just working for files that have less than about 80mb size, For more than that it gives me an error:
XMLHttpRequest failed: “Unable to connect to the parse API”

You were receiving “Request entity too large” and you are now receiving “Unable to connect to the parse API”, right? So it looks that now you have a different problem. Maybe your parse server process is hitting some limit. How are you running parse server?

Yes, that’s right.
I’m using pm2 with the same index.js in “parse-server-example” (with a little changes).

Isn’t because of http? http doesn’t have any limitation for uploading?

Can you check pm2 logs?

(node:12584) DeprecationWarning: Listening to events on the Db class has been deprecated and will be removed in the next major version.

(Use node --trace-deprecation ... to show where the warning was created)

-me: The number changes every time. (It’s not always 12584)

Oh ok. I had an issue where pm2 would watch the logs folder and server would trigger a restart whenever a file was saved (as saving files would write to logs), so the “unable to connect” error would show

After --trace-deprecation:

at …\node_modules\parse-server\lib\Adapters\Storage\Mongo\MongoStorageAdapter.js:182:16
at processTicksAndRejections (internal/process/task_queues.js:95:5)

<--- Last few GCs --->

[124:03D49218]    80198 ms: Mark-sweep 34.8 (41.6) -> 34.7 (41.6) MB, 2.2 / 0.0 ms  (+ 0.1 ms in 2 steps since start of marking, biggest step 0.0 ms, walltime since start of marking 34 ms) (average mu = 0.995, current mu = 0.970) finalize incremental mark[124:03D49218]    80387 ms: Mark-sweep 152.1 (158.9) -> 152.0 (159.2) MB, 2.7 / 0.0 ms  (+ 2.5 ms in 2 steps since start of marking, biggest step 2.5 ms, walltime since start of marking 183 ms) (average mu = 0.992, current mu = 0.972) allocation failure I

<--- JS stacktrace --->

FATAL ERROR: NewArray Allocation failed - process out of memory

I wonder why it goes to the “Adapters\Storage\Mongo\MongoStorageAdapter.js” !? I just set filesAdapter = fsAdapter, so it supposes to store the file in local storage not MongoDB! (and it does store them to parse/files actually)

@dblythy @davimacedo @Amir Going through this exact issue,
Uploading larger files failing in dashboard, s3 enabled, nginx reverse proxy enabled, max_upload_size 500M, nginx client_max_body_size 500M
ParseError: 100 XMLHttpRequest failed: Unable to connect to the Parse API
& Timeout error on inspect elements, nb smaller file uploading is working

require('dotenv').config();
var express = require('express');
//var http = require('http');
//var https = require('https');
var ParseServer = require('parse-server').ParseServer;
var ParseDashboard = require('parse-dashboard');
//var fs = require('fs');
var app = express();
// Specify the connection string for your mongodb database
// and the location to your Parse cloud code
//var allowInsecureHTTP = true;


//var options = {
  //  key: fs.readFileSync('/etc/ssl/key.pem', 'utf8'),
  //  cert: fs.readFileSync('/etc/ssl/cert.pem', 'utf8'),
//};

var S3Adapter = require("@parse/s3-files-adapter");
var AWS = require("aws-sdk");

//Configure Digital Ocean Spaces EndPoint
const spacesEndpoint = new AWS.Endpoint(process.env.SPACES_ENDPOINT);
var s3Options = {
  bucket: process.env.SPACES_BUCKET_NAME,
  baseUrl: process.env.SPACES_BASE_URL,
  region: process.env.SPACES_REGION,
  directAccess: true,
  globalCacheControl: "public, max-age=31536000",
  //bucketPrefix: process.env.SPACES_BUCKET_PREFIX,
  s3overrides: {
    accessKeyId: process.env.SPACES_ACCESS_KEY,
    secretAccessKey: process.env.SPACES_SECRET_KEY,
    endpoint: spacesEndpoint
  }
};

var s3Adapter = new S3Adapter(s3Options);

var api = new ParseServer({
    appName: 'Medo',
    maxUploadSize: "200mb",
    databaseURI: 'mongodb://adminmed:[email protected]:27017/meddb',

        cloud:__dirname + '/cloud/main.js' ,
    appId: process.env.APPID,
    masterKey: process.env.MASTERKEY,
    //fileKey: process.env.FILEKEY,
    // publicServerURL: 'http://ip/parse',

    serverURL: 'http://localhost:1337/parse-med/',
    //filesAdapter: {
    //   "module": "@parse/s3-files-adapter",
    //    "options": {
    //       "bucket": process.env.S3_BUCKET,
    //   }
    // },
    filesAdapter: s3Adapter
});

// Serve the Parse API on the /parse URL prefix

app.use('/parse-med', api);



var port = 1337;

//var httpsServer = require('https').createServer(options,app).listen(port, function() {
//console.log('parse-server running on SSL port ' + port + '.');
//});

app.listen(port, function() {
console.log('parse-server running on port ' + port);
});

nginx.conf


user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;

events {
        worker_connections 768;
        # multi_accept on;
}

http {

        ##
        # Basic Settings
        ##

        sendfile on;
        tcp_nopush on;
        tcp_nodelay on;
        keepalive_timeout 650;
        types_hash_max_size 2048;
        client_max_body_size 500M;
        proxy_connect_timeout 600;
        proxy_send_timeout 600;
        proxy_read_timeout 600;
        send_timeout 600;
        client_body_buffer_size 5M;

        include /etc/nginx/mime.types;
        default_type application/octet-stream;

        ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
        ssl_prefer_server_ciphers on;

        access_log /var/log/nginx/access.log;
        error_log /var/log/nginx/error.log;

        gzip on;

        include /etc/nginx/conf.d/*.conf;
        include /etc/nginx/sites-enabled/*;

 }
server {
    listen 80;
    listen [::]:80 default_server ipv6only=on;
    return 301 https://$host$request_uri;

    server_name example.com www.example.com;

    }
    # HTTPS - proxy requests to /parse-server/
  # through to Parse Server
server {
   listen 443 ssl http2;
   listen [::]:443 ssl http2;
   ssl_certificate         /etc/ssl/cert.pem;
   ssl_certificate_key     /etc/ssl/key.pem;
   ssl_client_certificate /etc/ssl/cloudflare.crt;
   ssl_verify_client on;
   server_name example.com www.example.com;
   root /usr/share/nginx/html;
   index index.html index.htm index.nginx-debian.html;


  # Pass requests for /parse/ to Parse Server instance at localhost:1337
  location /parse-example/ {
      proxy_set_header X-Real-IP $remote_addr;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_set_header X-NginX-Proxy true;
      proxy_pass http://localhost:1337/parse-example/;
      proxy_ssl_session_reuse off;
      proxy_set_header Host $http_host;
      proxy_redirect off;
    }
location /dashboard/ {
      proxy_set_header X-Real-IP $remote_addr;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_set_header X-NginX-Proxy true;
      proxy_pass http://localhost:4040/dashboard/;
      proxy_ssl_session_reuse off;
      proxy_set_header Host $http_host;
      proxy_redirect off;
   }
}