Zlib#

Stability: 2 - Stable

Source Code:lib/zlib.js

Thenode:zlib module provides compression functionality implemented usingGzip, Deflate/Inflate, Brotli, and Zstd.

To access it:

import zlibfrom'node:zlib';const zlib =require('node:zlib');

Compression and decompression are built around the Node.jsStreams API.

Compressing or decompressing a stream (such as a file) can be accomplished bypiping the source stream through azlibTransform stream into a destinationstream:

import {  createReadStream,  createWriteStream,}from'node:fs';import processfrom'node:process';import { createGzip }from'node:zlib';import { pipeline }from'node:stream';const gzip =createGzip();const source =createReadStream('input.txt');const destination =createWriteStream('input.txt.gz');pipeline(source, gzip, destination,(err) => {if (err) {console.error('An error occurred:', err);    process.exitCode =1;  }});const {  createReadStream,  createWriteStream,} =require('node:fs');const process =require('node:process');const { createGzip } =require('node:zlib');const { pipeline } =require('node:stream');const gzip =createGzip();const source =createReadStream('input.txt');const destination =createWriteStream('input.txt.gz');pipeline(source, gzip, destination,(err) => {if (err) {console.error('An error occurred:', err);    process.exitCode =1;  }});

Or, using the promisepipeline API:

import {  createReadStream,  createWriteStream,}from'node:fs';import { createGzip }from'node:zlib';import { pipeline }from'node:stream/promises';asyncfunctiondo_gzip(input, output) {const gzip =createGzip();const source =createReadStream(input);const destination =createWriteStream(output);awaitpipeline(source, gzip, destination);}awaitdo_gzip('input.txt','input.txt.gz');const {  createReadStream,  createWriteStream,} =require('node:fs');const process =require('node:process');const { createGzip } =require('node:zlib');const { pipeline } =require('node:stream/promises');asyncfunctiondo_gzip(input, output) {const gzip =createGzip();const source =createReadStream(input);const destination =createWriteStream(output);awaitpipeline(source, gzip, destination);}do_gzip('input.txt','input.txt.gz')  .catch((err) => {console.error('An error occurred:', err);    process.exitCode =1;  });

It is also possible to compress or decompress data in a single step:

import processfrom'node:process';import {Buffer }from'node:buffer';import { deflate, unzip }from'node:zlib';const input ='.................................';deflate(input,(err, buffer) => {if (err) {console.error('An error occurred:', err);    process.exitCode =1;  }console.log(buffer.toString('base64'));});const buffer =Buffer.from('eJzT0yMAAGTvBe8=','base64');unzip(buffer,(err, buffer) => {if (err) {console.error('An error occurred:', err);    process.exitCode =1;  }console.log(buffer.toString());});// Or, Promisifiedimport { promisify }from'node:util';const do_unzip =promisify(unzip);const unzippedBuffer =awaitdo_unzip(buffer);console.log(unzippedBuffer.toString());const { deflate, unzip } =require('node:zlib');const input ='.................................';deflate(input,(err, buffer) => {if (err) {console.error('An error occurred:', err);    process.exitCode =1;  }console.log(buffer.toString('base64'));});const buffer =Buffer.from('eJzT0yMAAGTvBe8=','base64');unzip(buffer,(err, buffer) => {if (err) {console.error('An error occurred:', err);    process.exitCode =1;  }console.log(buffer.toString());});// Or, Promisifiedconst { promisify } =require('node:util');const do_unzip =promisify(unzip);do_unzip(buffer)  .then((buf) =>console.log(buf.toString()))  .catch((err) => {console.error('An error occurred:', err);    process.exitCode =1;  });

Threadpool usage and performance considerations#

Allzlib APIs, except those that are explicitly synchronous, use the Node.jsinternal threadpool. This can lead to surprising effects and performancelimitations in some applications.

Creating and using a large number of zlib objects simultaneously can causesignificant memory fragmentation.

import zlibfrom'node:zlib';import {Buffer }from'node:buffer';const payload =Buffer.from('This is some data');// WARNING: DO NOT DO THIS!for (let i =0; i <30000; ++i) {  zlib.deflate(payload,(err, buffer) => {});}const zlib =require('node:zlib');const payload =Buffer.from('This is some data');// WARNING: DO NOT DO THIS!for (let i =0; i <30000; ++i) {  zlib.deflate(payload,(err, buffer) => {});}

In the preceding example, 30,000 deflate instances are created concurrently.Because of how some operating systems handle memory allocation anddeallocation, this may lead to significant memory fragmentation.

It is strongly recommended that the results of compressionoperations be cached to avoid duplication of effort.

Compressing HTTP requests and responses#

Thenode:zlib module can be used to implement support for thegzip,deflate,br, andzstd content-encoding mechanisms defined byHTTP.

The HTTPAccept-Encoding header is used within an HTTP request to identifythe compression encodings accepted by the client. TheContent-Encodingheader is used to identify the compression encodings actually applied to amessage.

The examples given below are drastically simplified to show the basic concept.Usingzlib encoding can be expensive, and the results ought to be cached.SeeMemory usage tuning for more information on the speed/memory/compressiontradeoffs involved inzlib usage.

// Client request exampleimport fsfrom'node:fs';import zlibfrom'node:zlib';import httpfrom'node:http';import processfrom'node:process';import { pipeline }from'node:stream';const request = http.get({host:'example.com',path:'/',port:80,headers: {'Accept-Encoding':'br,gzip,deflate,zstd' } });request.on('response',(response) => {const output = fs.createWriteStream('example.com_index.html');constonError = (err) => {if (err) {console.error('An error occurred:', err);      process.exitCode =1;    }  };switch (response.headers['content-encoding']) {case'br':pipeline(response, zlib.createBrotliDecompress(), output, onError);break;// Or, just use zlib.createUnzip() to handle both of the following cases:case'gzip':pipeline(response, zlib.createGunzip(), output, onError);break;case'deflate':pipeline(response, zlib.createInflate(), output, onError);break;case'zstd':pipeline(response, zlib.createZstdDecompress(), output, onError);break;default:pipeline(response, output, onError);break;  }});// Client request exampleconst zlib =require('node:zlib');const http =require('node:http');const fs =require('node:fs');const { pipeline } =require('node:stream');const request = http.get({host:'example.com',path:'/',port:80,headers: {'Accept-Encoding':'br,gzip,deflate,zstd' } });request.on('response',(response) => {const output = fs.createWriteStream('example.com_index.html');constonError = (err) => {if (err) {console.error('An error occurred:', err);      process.exitCode =1;    }  };switch (response.headers['content-encoding']) {case'br':pipeline(response, zlib.createBrotliDecompress(), output, onError);break;// Or, just use zlib.createUnzip() to handle both of the following cases:case'gzip':pipeline(response, zlib.createGunzip(), output, onError);break;case'deflate':pipeline(response, zlib.createInflate(), output, onError);break;case'zstd':pipeline(response, zlib.createZstdDecompress(), output, onError);break;default:pipeline(response, output, onError);break;  }});
// server example// Running a gzip operation on every request is quite expensive.// It would be much more efficient to cache the compressed buffer.import zlibfrom'node:zlib';import httpfrom'node:http';import fsfrom'node:fs';import { pipeline }from'node:stream';http.createServer((request, response) => {const raw = fs.createReadStream('index.html');// Store both a compressed and an uncompressed version of the resource.  response.setHeader('Vary','Accept-Encoding');const acceptEncoding = request.headers['accept-encoding'] ||'';constonError = (err) => {if (err) {// If an error occurs, there's not much we can do because// the server has already sent the 200 response code and// some amount of data has already been sent to the client.// The best we can do is terminate the response immediately// and log the error.      response.end();console.error('An error occurred:', err);    }  };// Note: This is not a conformant accept-encoding parser.// See https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.3if (/\bdeflate\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'deflate' });pipeline(raw, zlib.createDeflate(), response, onError);  }elseif (/\bgzip\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'gzip' });pipeline(raw, zlib.createGzip(), response, onError);  }elseif (/\bbr\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'br' });pipeline(raw, zlib.createBrotliCompress(), response, onError);  }elseif (/\bzstd\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'zstd' });pipeline(raw, zlib.createZstdCompress(), response, onError);  }else {    response.writeHead(200, {});pipeline(raw, response, onError);  }}).listen(1337);// server example// Running a gzip operation on every request is quite expensive.// It would be much more efficient to cache the compressed buffer.const zlib =require('node:zlib');const http =require('node:http');const fs =require('node:fs');const { pipeline } =require('node:stream');http.createServer((request, response) => {const raw = fs.createReadStream('index.html');// Store both a compressed and an uncompressed version of the resource.  response.setHeader('Vary','Accept-Encoding');const acceptEncoding = request.headers['accept-encoding'] ||'';constonError = (err) => {if (err) {// If an error occurs, there's not much we can do because// the server has already sent the 200 response code and// some amount of data has already been sent to the client.// The best we can do is terminate the response immediately// and log the error.      response.end();console.error('An error occurred:', err);    }  };// Note: This is not a conformant accept-encoding parser.// See https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.3if (/\bdeflate\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'deflate' });pipeline(raw, zlib.createDeflate(), response, onError);  }elseif (/\bgzip\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'gzip' });pipeline(raw, zlib.createGzip(), response, onError);  }elseif (/\bbr\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'br' });pipeline(raw, zlib.createBrotliCompress(), response, onError);  }elseif (/\bzstd\b/.test(acceptEncoding)) {    response.writeHead(200, {'Content-Encoding':'zstd' });pipeline(raw, zlib.createZstdCompress(), response, onError);  }else {    response.writeHead(200, {});pipeline(raw, response, onError);  }}).listen(1337);

By default, thezlib methods will throw an error when decompressingtruncated data. However, if it is known that the data is incomplete, orthe desire is to inspect only the beginning of a compressed file, it ispossible to suppress the default error handling by changing the flushingmethod that is used to decompress the last chunk of input data:

// This is a truncated version of the buffer from the above examplesconst buffer =Buffer.from('eJzT0yMA','base64');zlib.unzip(  buffer,// For Brotli, the equivalent is zlib.constants.BROTLI_OPERATION_FLUSH.// For Zstd, the equivalent is zlib.constants.ZSTD_e_flush.  {finishFlush: zlib.constants.Z_SYNC_FLUSH },(err, buffer) => {if (err) {console.error('An error occurred:', err);      process.exitCode =1;    }console.log(buffer.toString());  });

This will not change the behavior in other error-throwing situations, e.g.when the input data has an invalid format. Using this method, it will not bepossible to determine whether the input ended prematurely or lacks theintegrity checks, making it necessary to manually check that thedecompressed result is valid.

Memory usage tuning#

For zlib-based streams#

Fromzlib/zconf.h, modified for Node.js usage:

The memory requirements for deflate are (in bytes):

(1 << (windowBits +2)) + (1 << (memLevel +9))

That is: 128K forwindowBits = 15 + 128K formemLevel = 8(default values) plus a few kilobytes for small objects.

For example, to reduce the default memory requirements from 256K to 128K, theoptions should be set to:

const options = {windowBits:14,memLevel:7 };

This will, however, generally degrade compression.

The memory requirements for inflate are (in bytes)1 << windowBits.That is, 32K forwindowBits = 15 (default value) plus a few kilobytesfor small objects.

This is in addition to a single internal output slab buffer of sizechunkSize, which defaults to 16K.

The speed ofzlib compression is affected most dramatically by thelevel setting. A higher level will result in better compression, butwill take longer to complete. A lower level will result in lesscompression, but will be much faster.

In general, greater memory usage options will mean that Node.js has to makefewer calls tozlib because it will be able to process more data oneachwrite operation. So, this is another factor that affects thespeed, at the cost of memory usage.

For Brotli-based streams#

There are equivalents to the zlib options for Brotli-based streams, althoughthese options have different ranges than the zlib ones:

  • zlib'slevel option matches Brotli'sBROTLI_PARAM_QUALITY option.
  • zlib'swindowBits option matches Brotli'sBROTLI_PARAM_LGWIN option.

Seebelow for more details on Brotli-specific options.

For Zstd-based streams#

Stability: 1 - Experimental

There are equivalents to the zlib options for Zstd-based streams, althoughthese options have different ranges than the zlib ones:

  • zlib'slevel option matches Zstd'sZSTD_c_compressionLevel option.
  • zlib'swindowBits option matches Zstd'sZSTD_c_windowLog option.

Seebelow for more details on Zstd-specific options.

Flushing#

Calling.flush() on a compression stream will makezlib return as muchoutput as currently possible. This may come at the cost of degraded compressionquality, but can be useful when data needs to be available as soon as possible.

In the following example,flush() is used to write a compressed partialHTTP response to the client:

import zlibfrom'node:zlib';import httpfrom'node:http';import { pipeline }from'node:stream';http.createServer((request, response) => {// For the sake of simplicity, the Accept-Encoding checks are omitted.  response.writeHead(200, {'content-encoding':'gzip' });const output = zlib.createGzip();let i;pipeline(output, response,(err) => {if (err) {// If an error occurs, there's not much we can do because// the server has already sent the 200 response code and// some amount of data has already been sent to the client.// The best we can do is terminate the response immediately// and log the error.clearInterval(i);      response.end();console.error('An error occurred:', err);    }  });  i =setInterval(() => {    output.write(`The current time is${Date()}\n`,() => {// The data has been passed to zlib, but the compression algorithm may// have decided to buffer the data for more efficient compression.// Calling .flush() will make the data available as soon as the client// is ready to receive it.      output.flush();    });  },1000);}).listen(1337);const zlib =require('node:zlib');const http =require('node:http');const { pipeline } =require('node:stream');http.createServer((request, response) => {// For the sake of simplicity, the Accept-Encoding checks are omitted.  response.writeHead(200, {'content-encoding':'gzip' });const output = zlib.createGzip();let i;pipeline(output, response,(err) => {if (err) {// If an error occurs, there's not much we can do because// the server has already sent the 200 response code and// some amount of data has already been sent to the client.// The best we can do is terminate the response immediately// and log the error.clearInterval(i);      response.end();console.error('An error occurred:', err);    }  });  i =setInterval(() => {    output.write(`The current time is${Date()}\n`,() => {// The data has been passed to zlib, but the compression algorithm may// have decided to buffer the data for more efficient compression.// Calling .flush() will make the data available as soon as the client// is ready to receive it.      output.flush();    });  },1000);}).listen(1337);

Constants#

Added in: v0.5.8

zlib constants#

All of the constants defined inzlib.h are also defined onrequire('node:zlib').constants. In the normal course of operations, it willnot be necessary to use these constants. They are documented so that theirpresence is not surprising. This section is taken almost directly from thezlib documentation.

Previously, the constants were available directly fromrequire('node:zlib'),for instancezlib.Z_NO_FLUSH. Accessing the constants directly from the moduleis currently still possible but is deprecated.

Allowed flush values.

  • zlib.constants.Z_NO_FLUSH
  • zlib.constants.Z_PARTIAL_FLUSH
  • zlib.constants.Z_SYNC_FLUSH
  • zlib.constants.Z_FULL_FLUSH
  • zlib.constants.Z_FINISH
  • zlib.constants.Z_BLOCK

Return codes for the compression/decompression functions. Negativevalues are errors, positive values are used for special but normalevents.

  • zlib.constants.Z_OK
  • zlib.constants.Z_STREAM_END
  • zlib.constants.Z_NEED_DICT
  • zlib.constants.Z_ERRNO
  • zlib.constants.Z_STREAM_ERROR
  • zlib.constants.Z_DATA_ERROR
  • zlib.constants.Z_MEM_ERROR
  • zlib.constants.Z_BUF_ERROR
  • zlib.constants.Z_VERSION_ERROR

Compression levels.

  • zlib.constants.Z_NO_COMPRESSION
  • zlib.constants.Z_BEST_SPEED
  • zlib.constants.Z_BEST_COMPRESSION
  • zlib.constants.Z_DEFAULT_COMPRESSION

Compression strategy.

  • zlib.constants.Z_FILTERED
  • zlib.constants.Z_HUFFMAN_ONLY
  • zlib.constants.Z_RLE
  • zlib.constants.Z_FIXED
  • zlib.constants.Z_DEFAULT_STRATEGY

Brotli constants#

Added in: v11.7.0, v10.16.0

There are several options and other constants available for Brotli-basedstreams:

Flush operations#

The following values are valid flush operations for Brotli-based streams:

  • zlib.constants.BROTLI_OPERATION_PROCESS (default for all operations)
  • zlib.constants.BROTLI_OPERATION_FLUSH (default when calling.flush())
  • zlib.constants.BROTLI_OPERATION_FINISH (default for the last chunk)
  • zlib.constants.BROTLI_OPERATION_EMIT_METADATA
    • This particular operation may be hard to use in a Node.js context,as the streaming layer makes it hard to know which data will end upin this frame. Also, there is currently no way to consume this data throughthe Node.js API.
Compressor options#

There are several options that can be set on Brotli encoders, affectingcompression efficiency and speed. Both the keys and the values can be accessedas properties of thezlib.constants object.

The most important options are:

  • BROTLI_PARAM_MODE
    • BROTLI_MODE_GENERIC (default)
    • BROTLI_MODE_TEXT, adjusted for UTF-8 text
    • BROTLI_MODE_FONT, adjusted for WOFF 2.0 fonts
  • BROTLI_PARAM_QUALITY
    • Ranges fromBROTLI_MIN_QUALITY toBROTLI_MAX_QUALITY,with a default ofBROTLI_DEFAULT_QUALITY.
  • BROTLI_PARAM_SIZE_HINT
    • Integer value representing the expected input size;defaults to0 for an unknown input size.

The following flags can be set for advanced control over the compressionalgorithm and memory usage tuning:

  • BROTLI_PARAM_LGWIN
    • Ranges fromBROTLI_MIN_WINDOW_BITS toBROTLI_MAX_WINDOW_BITS,with a default ofBROTLI_DEFAULT_WINDOW, or up toBROTLI_LARGE_MAX_WINDOW_BITS if theBROTLI_PARAM_LARGE_WINDOW flagis set.
  • BROTLI_PARAM_LGBLOCK
    • Ranges fromBROTLI_MIN_INPUT_BLOCK_BITS toBROTLI_MAX_INPUT_BLOCK_BITS.
  • BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING
    • Boolean flag that decreases compression ratio in favour ofdecompression speed.
  • BROTLI_PARAM_LARGE_WINDOW
    • Boolean flag enabling “Large Window Brotli” mode (not compatible with theBrotli format as standardized inRFC 7932).
  • BROTLI_PARAM_NPOSTFIX
    • Ranges from0 toBROTLI_MAX_NPOSTFIX.
  • BROTLI_PARAM_NDIRECT
    • Ranges from0 to15 << NPOSTFIX in steps of1 << NPOSTFIX.
Decompressor options#

These advanced options are available for controlling decompression:

  • BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION
    • Boolean flag that affects internal memory allocation patterns.
  • BROTLI_DECODER_PARAM_LARGE_WINDOW
    • Boolean flag enabling “Large Window Brotli” mode (not compatible with theBrotli format as standardized inRFC 7932).

Zstd constants#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

There are several options and other constants available for Zstd-basedstreams:

Flush operations#

The following values are valid flush operations for Zstd-based streams:

  • zlib.constants.ZSTD_e_continue (default for all operations)
  • zlib.constants.ZSTD_e_flush (default when calling.flush())
  • zlib.constants.ZSTD_e_end (default for the last chunk)
Compressor options#

There are several options that can be set on Zstd encoders, affectingcompression efficiency and speed. Both the keys and the values can be accessedas properties of thezlib.constants object.

The most important options are:

  • ZSTD_c_compressionLevel
    • Set compression parameters according to pre-defined cLevel table. Defaultlevel is ZSTD_CLEVEL_DEFAULT==3.
  • ZSTD_c_strategy
    • Select the compression strategy.
    • Possible values are listed in the strategy options section below.
Strategy options#

The following constants can be used as values for theZSTD_c_strategyparameter:

  • zlib.constants.ZSTD_fast
  • zlib.constants.ZSTD_dfast
  • zlib.constants.ZSTD_greedy
  • zlib.constants.ZSTD_lazy
  • zlib.constants.ZSTD_lazy2
  • zlib.constants.ZSTD_btlazy2
  • zlib.constants.ZSTD_btopt
  • zlib.constants.ZSTD_btultra
  • zlib.constants.ZSTD_btultra2

Example:

const stream = zlib.createZstdCompress({params: {    [zlib.constants.ZSTD_c_strategy]: zlib.constants.ZSTD_btultra,  },});
Pledged Source Size#

It's possible to specify the expected total size of the uncompressed input viaopts.pledgedSrcSize. If the size doesn't match at the end of the input,compression will fail with the codeZSTD_error_srcSize_wrong.

Decompressor options#

These advanced options are available for controlling decompression:

  • ZSTD_d_windowLogMax
    • Select a size limit (in power of 2) beyond which the streaming API willrefuse to allocate memory buffer in order to protect the host fromunreasonable memory requirements.

Class:Options#

History
VersionChanges
v14.5.0, v12.19.0

ThemaxOutputLength option is supported now.

v9.4.0

Thedictionary option can be anArrayBuffer.

v8.0.0

Thedictionary option can be anUint8Array now.

v5.11.0

ThefinishFlush option is supported now.

v0.11.1

Added in: v0.11.1

Each zlib-based class takes anoptions object. No options are required.

Some options are only relevant when compressing and areignored by the decompression classes.

See thedeflateInit2 andinflateInit2 documentation for moreinformation.

Class:BrotliOptions#

History
VersionChanges
v14.5.0, v12.19.0

ThemaxOutputLength option is supported now.

v11.7.0

Added in: v11.7.0

Each Brotli-based class takes anoptions object. All options are optional.

For example:

const stream = zlib.createBrotliCompress({chunkSize:32 *1024,params: {    [zlib.constants.BROTLI_PARAM_MODE]: zlib.constants.BROTLI_MODE_TEXT,    [zlib.constants.BROTLI_PARAM_QUALITY]:4,    [zlib.constants.BROTLI_PARAM_SIZE_HINT]: fs.statSync(inputFile).size,  },});

Class:zlib.BrotliCompress#

Added in: v11.7.0, v10.16.0

Compress data using the Brotli algorithm.

Class:zlib.BrotliDecompress#

Added in: v11.7.0, v10.16.0

Decompress data using the Brotli algorithm.

Class:zlib.Deflate#

Added in: v0.5.8

Compress data using deflate.

Class:zlib.DeflateRaw#

Added in: v0.5.8

Compress data using deflate, and do not append azlib header.

Class:zlib.Gunzip#

History
VersionChanges
v6.0.0

Trailing garbage at the end of the input stream will now result in an'error' event.

v5.9.0

Multiple concatenated gzip file members are supported now.

v5.0.0

A truncated input stream will now result in an'error' event.

v0.5.8

Added in: v0.5.8

Decompress a gzip stream.

Class:zlib.Gzip#

Added in: v0.5.8

Compress data using gzip.

Class:zlib.Inflate#

History
VersionChanges
v5.0.0

A truncated input stream will now result in an'error' event.

v0.5.8

Added in: v0.5.8

Decompress a deflate stream.

Class:zlib.InflateRaw#

History
VersionChanges
v6.8.0

Custom dictionaries are now supported byInflateRaw.

v5.0.0

A truncated input stream will now result in an'error' event.

v0.5.8

Added in: v0.5.8

Decompress a raw deflate stream.

Class:zlib.Unzip#

Added in: v0.5.8

Decompress either a Gzip- or Deflate-compressed stream by auto-detectingthe header.

Class:zlib.ZlibBase#

History
VersionChanges
v11.7.0, v10.16.0

This class was renamed fromZlib toZlibBase.

v0.5.8

Added in: v0.5.8

Not exported by thenode:zlib module. It is documented here because it is thebase class of the compressor/decompressor classes.

This class inherits fromstream.Transform, allowingnode:zlib objects tobe used in pipes and similar stream operations.

zlib.bytesWritten#

Added in: v10.0.0

Thezlib.bytesWritten property specifies the number of bytes written tothe engine, before the bytes are processed (compressed or decompressed,as appropriate for the derived class).

zlib.close([callback])#

Added in: v0.9.4

Close the underlying handle.

zlib.flush([kind, ]callback)#

Added in: v0.5.8
  • kindDefault:zlib.constants.Z_FULL_FLUSH for zlib-based streams,zlib.constants.BROTLI_OPERATION_FLUSH for Brotli-based streams.
  • callback<Function>

Flush pending data. Don't call this frivolously, premature flushes negativelyimpact the effectiveness of the compression algorithm.

Calling this only flushes data from the internalzlib state, and does notperform flushing of any kind on the streams level. Rather, it behaves like anormal call to.write(), i.e. it will be queued up behind other pendingwrites and will only produce output when data is being read from the stream.

zlib.params(level, strategy, callback)#

Added in: v0.11.4

This function is only available for zlib-based streams, i.e. not Brotli.

Dynamically update the compression level and compression strategy.Only applicable to deflate algorithm.

zlib.reset()#

Added in: v0.7.0

Reset the compressor/decompressor to factory defaults. Only applicable tothe inflate and deflate algorithms.

Class:ZstdOptions#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

Each Zstd-based class takes anoptions object. All options are optional.

For example:

const stream = zlib.createZstdCompress({chunkSize:32 *1024,params: {    [zlib.constants.ZSTD_c_compressionLevel]:10,    [zlib.constants.ZSTD_c_checksumFlag]:1,  },});

Class:zlib.ZstdCompress#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

Compress data using the Zstd algorithm.

Class:zlib.ZstdDecompress#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

Decompress data using the Zstd algorithm.

zlib.constants#

Added in: v7.0.0

Provides an object enumerating Zlib-related constants.

zlib.crc32(data[, value])#

Added in: v22.2.0, v20.15.0
  • data<string> |<Buffer> |<TypedArray> |<DataView> Whendata is a string,it will be encoded as UTF-8 before being used for computation.
  • value<integer> An optional starting value. It must be a 32-bit unsignedinteger.Default:0
  • Returns:<integer> A 32-bit unsigned integer containing the checksum.

Computes a 32-bitCyclic Redundancy Check checksum ofdata. Ifvalue is specified, it is used as the starting value of the checksum,otherwise, 0 is used as the starting value.

The CRC algorithm is designed to compute checksums and to detect errorin data transmission. It's not suitable for cryptographic authentication.

To be consistent with other APIs, if thedata is a string, it willbe encoded with UTF-8 before being used for computation. If users onlyuse Node.js to compute and match the checksums, this works well withother APIs that uses the UTF-8 encoding by default.

Some third-party JavaScript libraries compute the checksum on astring based onstr.charCodeAt() so that it can be run in browsers.If users want to match the checksum computed with this kind of libraryin the browser, it's better to use the same library in Node.jsif it also runs in Node.js. If users have to usezlib.crc32() tomatch the checksum produced by such a third-party library:

  1. If the library acceptsUint8Array as input, useTextEncoderin the browser to encode the string into aUint8Array with UTF-8encoding, and compute the checksum based on the UTF-8 encoded stringin the browser.
  2. If the library only takes a string and compute the data based onstr.charCodeAt(), on the Node.js side, convert the string intoa buffer usingBuffer.from(str, 'utf16le').
import zlibfrom'node:zlib';import {Buffer }from'node:buffer';let crc = zlib.crc32('hello');// 907060870crc = zlib.crc32('world', crc);// 4192936109crc = zlib.crc32(Buffer.from('hello','utf16le'));// 1427272415crc = zlib.crc32(Buffer.from('world','utf16le'), crc);// 4150509955const zlib =require('node:zlib');const {Buffer } =require('node:buffer');let crc = zlib.crc32('hello');// 907060870crc = zlib.crc32('world', crc);// 4192936109crc = zlib.crc32(Buffer.from('hello','utf16le'));// 1427272415crc = zlib.crc32(Buffer.from('world','utf16le'), crc);// 4150509955

zlib.createBrotliCompress([options])#

Added in: v11.7.0, v10.16.0

Creates and returns a newBrotliCompress object.

zlib.createBrotliDecompress([options])#

Added in: v11.7.0, v10.16.0

Creates and returns a newBrotliDecompress object.

zlib.createDeflate([options])#

Added in: v0.5.8

Creates and returns a newDeflate object.

zlib.createDeflateRaw([options])#

Added in: v0.5.8

Creates and returns a newDeflateRaw object.

An upgrade of zlib from 1.2.8 to 1.2.11 changed behavior whenwindowBitsis set to 8 for raw deflate streams. zlib would automatically setwindowBitsto 9 if was initially set to 8. Newer versions of zlib will throw an exception,so Node.js restored the original behavior of upgrading a value of 8 to 9,since passingwindowBits = 9 to zlib actually results in a compressed streamthat effectively uses an 8-bit window only.

zlib.createGunzip([options])#

Added in: v0.5.8

Creates and returns a newGunzip object.

zlib.createGzip([options])#

Added in: v0.5.8

Creates and returns a newGzip object.Seeexample.

zlib.createInflate([options])#

Added in: v0.5.8

Creates and returns a newInflate object.

zlib.createInflateRaw([options])#

Added in: v0.5.8

Creates and returns a newInflateRaw object.

zlib.createUnzip([options])#

Added in: v0.5.8

Creates and returns a newUnzip object.

zlib.createZstdCompress([options])#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

Creates and returns a newZstdCompress object.

zlib.createZstdDecompress([options])#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

Creates and returns a newZstdDecompress object.

Convenience methods#

All of these take a<Buffer>,<TypedArray>,<DataView>,<ArrayBuffer>, or stringas the first argument, an optional second argumentto supply options to thezlib classes and will call the supplied callbackwithcallback(error, result).

Every method has a*Sync counterpart, which accept the same arguments, butwithout a callback.

zlib.brotliCompress(buffer[, options], callback)#

Added in: v11.7.0, v10.16.0

zlib.brotliCompressSync(buffer[, options])#

Added in: v11.7.0, v10.16.0

Compress a chunk of data withBrotliCompress.

zlib.brotliDecompress(buffer[, options], callback)#

Added in: v11.7.0, v10.16.0

zlib.brotliDecompressSync(buffer[, options])#

Added in: v11.7.0, v10.16.0

Decompress a chunk of data withBrotliDecompress.

zlib.deflate(buffer[, options], callback)#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.6.0

Added in: v0.6.0

zlib.deflateSync(buffer[, options])#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.11.12

Added in: v0.11.12

Compress a chunk of data withDeflate.

zlib.deflateRaw(buffer[, options], callback)#

History
VersionChanges
v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.6.0

Added in: v0.6.0

zlib.deflateRawSync(buffer[, options])#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.11.12

Added in: v0.11.12

Compress a chunk of data withDeflateRaw.

zlib.gunzip(buffer[, options], callback)#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.6.0

Added in: v0.6.0

zlib.gunzipSync(buffer[, options])#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.11.12

Added in: v0.11.12

Decompress a chunk of data withGunzip.

zlib.gzip(buffer[, options], callback)#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.6.0

Added in: v0.6.0

zlib.gzipSync(buffer[, options])#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.11.12

Added in: v0.11.12

Compress a chunk of data withGzip.

zlib.inflate(buffer[, options], callback)#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.6.0

Added in: v0.6.0

zlib.inflateSync(buffer[, options])#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.11.12

Added in: v0.11.12

Decompress a chunk of data withInflate.

zlib.inflateRaw(buffer[, options], callback)#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.6.0

Added in: v0.6.0

zlib.inflateRawSync(buffer[, options])#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.11.12

Added in: v0.11.12

Decompress a chunk of data withInflateRaw.

zlib.unzip(buffer[, options], callback)#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.6.0

Added in: v0.6.0

zlib.unzipSync(buffer[, options])#

History
VersionChanges
v9.4.0

Thebuffer parameter can be anArrayBuffer.

v8.0.0

Thebuffer parameter can be anyTypedArray orDataView.

v8.0.0

Thebuffer parameter can be anUint8Array now.

v0.11.12

Added in: v0.11.12

Decompress a chunk of data withUnzip.

zlib.zstdCompress(buffer[, options], callback)#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

zlib.zstdCompressSync(buffer[, options])#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

Compress a chunk of data withZstdCompress.

zlib.zstdDecompress(buffer[, options], callback)#

Added in: v23.8.0, v22.15.0

zlib.zstdDecompressSync(buffer[, options])#

Stability: 1 - Experimental

Added in: v23.8.0, v22.15.0

Decompress a chunk of data withZstdDecompress.