|  | var fs = require("fs"); | 
|  | var zlib = require("zlib"); | 
|  | var fd_slicer = require("fd-slicer"); | 
|  | var util = require("util"); | 
|  | var EventEmitter = require("events").EventEmitter; | 
|  | var Transform = require("stream").Transform; | 
|  | var PassThrough = require("stream").PassThrough; | 
|  | var Writable = require("stream").Writable; | 
|  |  | 
|  | exports.open = open; | 
|  | exports.fromFd = fromFd; | 
|  | exports.fromBuffer = fromBuffer; | 
|  | exports.fromRandomAccessReader = fromRandomAccessReader; | 
|  | exports.dosDateTimeToDate = dosDateTimeToDate; | 
|  | exports.ZipFile = ZipFile; | 
|  | exports.Entry = Entry; | 
|  | exports.RandomAccessReader = RandomAccessReader; | 
|  |  | 
|  | function open(path, options, callback) { | 
|  | if (typeof options === "function") { | 
|  | callback = options; | 
|  | options = null; | 
|  | } | 
|  | if (options == null) options = {}; | 
|  | if (options.autoClose == null) options.autoClose = true; | 
|  | if (options.lazyEntries == null) options.lazyEntries = false; | 
|  | if (callback == null) callback = defaultCallback; | 
|  | fs.open(path, "r", function(err, fd) { | 
|  | if (err) return callback(err); | 
|  | fromFd(fd, options, function(err, zipfile) { | 
|  | if (err) fs.close(fd, defaultCallback); | 
|  | callback(err, zipfile); | 
|  | }); | 
|  | }); | 
|  | } | 
|  |  | 
|  | function fromFd(fd, options, callback) { | 
|  | if (typeof options === "function") { | 
|  | callback = options; | 
|  | options = null; | 
|  | } | 
|  | if (options == null) options = {}; | 
|  | if (options.autoClose == null) options.autoClose = false; | 
|  | if (options.lazyEntries == null) options.lazyEntries = false; | 
|  | if (callback == null) callback = defaultCallback; | 
|  | fs.fstat(fd, function(err, stats) { | 
|  | if (err) return callback(err); | 
|  | var reader = fd_slicer.createFromFd(fd, {autoClose: true}); | 
|  | fromRandomAccessReader(reader, stats.size, options, callback); | 
|  | }); | 
|  | } | 
|  |  | 
|  | function fromBuffer(buffer, options, callback) { | 
|  | if (typeof options === "function") { | 
|  | callback = options; | 
|  | options = null; | 
|  | } | 
|  | if (options == null) options = {}; | 
|  | options.autoClose = false; | 
|  | if (options.lazyEntries == null) options.lazyEntries = false; | 
|  | // i got your open file right here. | 
|  | var reader = fd_slicer.createFromBuffer(buffer); | 
|  | fromRandomAccessReader(reader, buffer.length, options, callback); | 
|  | } | 
|  |  | 
|  | function fromRandomAccessReader(reader, totalSize, options, callback) { | 
|  | if (typeof options === "function") { | 
|  | callback = options; | 
|  | options = null; | 
|  | } | 
|  | if (options == null) options = {}; | 
|  | if (options.autoClose == null) options.autoClose = true; | 
|  | if (options.lazyEntries == null) options.lazyEntries = false; | 
|  | if (callback == null) callback = defaultCallback; | 
|  | if (typeof totalSize !== "number") throw new Error("expected totalSize parameter to be a number"); | 
|  | if (totalSize > Number.MAX_SAFE_INTEGER) { | 
|  | throw new Error("zip file too large. only file sizes up to 2^52 are supported due to JavaScript's Number type being an IEEE 754 double."); | 
|  | } | 
|  |  | 
|  | // the matching unref() call is in zipfile.close() | 
|  | reader.ref(); | 
|  |  | 
|  | // eocdr means End of Central Directory Record. | 
|  | // search backwards for the eocdr signature. | 
|  | // the last field of the eocdr is a variable-length comment. | 
|  | // the comment size is encoded in a 2-byte field in the eocdr, which we can't find without trudging backwards through the comment to find it. | 
|  | // as a consequence of this design decision, it's possible to have ambiguous zip file metadata if a coherent eocdr was in the comment. | 
|  | // we search backwards for a eocdr signature, and hope that whoever made the zip file was smart enough to forbid the eocdr signature in the comment. | 
|  | var eocdrWithoutCommentSize = 22; | 
|  | var maxCommentSize = 0x10000; // 2-byte size | 
|  | var bufferSize = Math.min(eocdrWithoutCommentSize + maxCommentSize, totalSize); | 
|  | var buffer = new Buffer(bufferSize); | 
|  | var bufferReadStart = totalSize - buffer.length; | 
|  | readAndAssertNoEof(reader, buffer, 0, bufferSize, bufferReadStart, function(err) { | 
|  | if (err) return callback(err); | 
|  | for (var i = bufferSize - eocdrWithoutCommentSize; i >= 0; i -= 1) { | 
|  | if (buffer.readUInt32LE(i) !== 0x06054b50) continue; | 
|  | // found eocdr | 
|  | var eocdrBuffer = buffer.slice(i); | 
|  |  | 
|  | // 0 - End of central directory signature = 0x06054b50 | 
|  | // 4 - Number of this disk | 
|  | var diskNumber = eocdrBuffer.readUInt16LE(4); | 
|  | if (diskNumber !== 0) return callback(new Error("multi-disk zip files are not supported: found disk number: " + diskNumber)); | 
|  | // 6 - Disk where central directory starts | 
|  | // 8 - Number of central directory records on this disk | 
|  | // 10 - Total number of central directory records | 
|  | var entryCount = eocdrBuffer.readUInt16LE(10); | 
|  | // 12 - Size of central directory (bytes) | 
|  | // 16 - Offset of start of central directory, relative to start of archive | 
|  | var centralDirectoryOffset = eocdrBuffer.readUInt32LE(16); | 
|  | // 20 - Comment length | 
|  | var commentLength = eocdrBuffer.readUInt16LE(20); | 
|  | var expectedCommentLength = eocdrBuffer.length - eocdrWithoutCommentSize; | 
|  | if (commentLength !== expectedCommentLength) { | 
|  | return callback(new Error("invalid comment length. expected: " + expectedCommentLength + ". found: " + commentLength)); | 
|  | } | 
|  | // 22 - Comment | 
|  | // the encoding is always cp437. | 
|  | var comment = bufferToString(eocdrBuffer, 22, eocdrBuffer.length, false); | 
|  |  | 
|  | if (!(entryCount === 0xffff || centralDirectoryOffset === 0xffffffff)) { | 
|  | return callback(null, new ZipFile(reader, centralDirectoryOffset, totalSize, entryCount, comment, options.autoClose, options.lazyEntries)); | 
|  | } | 
|  |  | 
|  | // ZIP64 format | 
|  |  | 
|  | // ZIP64 Zip64 end of central directory locator | 
|  | var zip64EocdlBuffer = new Buffer(20); | 
|  | var zip64EocdlOffset = bufferReadStart + i - zip64EocdlBuffer.length; | 
|  | readAndAssertNoEof(reader, zip64EocdlBuffer, 0, zip64EocdlBuffer.length, zip64EocdlOffset, function(err) { | 
|  | if (err) return callback(err); | 
|  |  | 
|  | // 0 - zip64 end of central dir locator signature = 0x07064b50 | 
|  | if (zip64EocdlBuffer.readUInt32LE(0) !== 0x07064b50) { | 
|  | return callback(new Error("invalid ZIP64 End of Central Directory Locator signature")); | 
|  | } | 
|  | // 4 - number of the disk with the start of the zip64 end of central directory | 
|  | // 8 - relative offset of the zip64 end of central directory record | 
|  | var zip64EocdrOffset = readUInt64LE(zip64EocdlBuffer, 8); | 
|  | // 16 - total number of disks | 
|  |  | 
|  | // ZIP64 end of central directory record | 
|  | var zip64EocdrBuffer = new Buffer(56); | 
|  | readAndAssertNoEof(reader, zip64EocdrBuffer, 0, zip64EocdrBuffer.length, zip64EocdrOffset, function(err) { | 
|  | if (err) return callback(err); | 
|  |  | 
|  | // 0 - zip64 end of central dir signature                           4 bytes  (0x06064b50) | 
|  | if (zip64EocdrBuffer.readUInt32LE(0) !== 0x06064b50) return callback(new Error("invalid ZIP64 end of central directory record signature")); | 
|  | // 4 - size of zip64 end of central directory record                8 bytes | 
|  | // 12 - version made by                                             2 bytes | 
|  | // 14 - version needed to extract                                   2 bytes | 
|  | // 16 - number of this disk                                         4 bytes | 
|  | // 20 - number of the disk with the start of the central directory  4 bytes | 
|  | // 24 - total number of entries in the central directory on this disk         8 bytes | 
|  | // 32 - total number of entries in the central directory            8 bytes | 
|  | entryCount = readUInt64LE(zip64EocdrBuffer, 32); | 
|  | // 40 - size of the central directory                               8 bytes | 
|  | // 48 - offset of start of central directory with respect to the starting disk number     8 bytes | 
|  | centralDirectoryOffset = readUInt64LE(zip64EocdrBuffer, 48); | 
|  | // 56 - zip64 extensible data sector                                (variable size) | 
|  | return callback(null, new ZipFile(reader, centralDirectoryOffset, totalSize, entryCount, comment, options.autoClose, options.lazyEntries)); | 
|  | }); | 
|  | }); | 
|  | return; | 
|  | } | 
|  | callback(new Error("end of central directory record signature not found")); | 
|  | }); | 
|  | } | 
|  |  | 
|  | util.inherits(ZipFile, EventEmitter); | 
|  | function ZipFile(reader, centralDirectoryOffset, fileSize, entryCount, comment, autoClose, lazyEntries) { | 
|  | var self = this; | 
|  | EventEmitter.call(self); | 
|  | self.reader = reader; | 
|  | // forward close events | 
|  | self.reader.on("error", function(err) { | 
|  | // error closing the fd | 
|  | emitError(self, err); | 
|  | }); | 
|  | self.reader.once("close", function() { | 
|  | self.emit("close"); | 
|  | }); | 
|  | self.readEntryCursor = centralDirectoryOffset; | 
|  | self.fileSize = fileSize; | 
|  | self.entryCount = entryCount; | 
|  | self.comment = comment; | 
|  | self.entriesRead = 0; | 
|  | self.autoClose = !!autoClose; | 
|  | self.lazyEntries = !!lazyEntries; | 
|  | self.isOpen = true; | 
|  | self.emittedError = false; | 
|  |  | 
|  | if (!self.lazyEntries) self.readEntry(); | 
|  | } | 
|  | ZipFile.prototype.close = function() { | 
|  | if (!this.isOpen) return; | 
|  | this.isOpen = false; | 
|  | this.reader.unref(); | 
|  | }; | 
|  |  | 
|  | function emitErrorAndAutoClose(self, err) { | 
|  | if (self.autoClose) self.close(); | 
|  | emitError(self, err); | 
|  | } | 
|  | function emitError(self, err) { | 
|  | if (self.emittedError) return; | 
|  | self.emittedError = true; | 
|  | self.emit("error", err); | 
|  | } | 
|  |  | 
|  | ZipFile.prototype.readEntry = function() { | 
|  | var self = this; | 
|  | if (self.entryCount === self.entriesRead) { | 
|  | // done with metadata | 
|  | setImmediate(function() { | 
|  | if (self.autoClose) self.close(); | 
|  | if (self.emittedError) return; | 
|  | self.emit("end"); | 
|  | }); | 
|  | return; | 
|  | } | 
|  | if (self.emittedError) return; | 
|  | var buffer = new Buffer(46); | 
|  | readAndAssertNoEof(self.reader, buffer, 0, buffer.length, self.readEntryCursor, function(err) { | 
|  | if (err) return emitErrorAndAutoClose(self, err); | 
|  | if (self.emittedError) return; | 
|  | var entry = new Entry(); | 
|  | // 0 - Central directory file header signature | 
|  | var signature = buffer.readUInt32LE(0); | 
|  | if (signature !== 0x02014b50) return emitErrorAndAutoClose(self, new Error("invalid central directory file header signature: 0x" + signature.toString(16))); | 
|  | // 4 - Version made by | 
|  | entry.versionMadeBy = buffer.readUInt16LE(4); | 
|  | // 6 - Version needed to extract (minimum) | 
|  | entry.versionNeededToExtract = buffer.readUInt16LE(6); | 
|  | // 8 - General purpose bit flag | 
|  | entry.generalPurposeBitFlag = buffer.readUInt16LE(8); | 
|  | // 10 - Compression method | 
|  | entry.compressionMethod = buffer.readUInt16LE(10); | 
|  | // 12 - File last modification time | 
|  | entry.lastModFileTime = buffer.readUInt16LE(12); | 
|  | // 14 - File last modification date | 
|  | entry.lastModFileDate = buffer.readUInt16LE(14); | 
|  | // 16 - CRC-32 | 
|  | entry.crc32 = buffer.readUInt32LE(16); | 
|  | // 20 - Compressed size | 
|  | entry.compressedSize = buffer.readUInt32LE(20); | 
|  | // 24 - Uncompressed size | 
|  | entry.uncompressedSize = buffer.readUInt32LE(24); | 
|  | // 28 - File name length (n) | 
|  | entry.fileNameLength = buffer.readUInt16LE(28); | 
|  | // 30 - Extra field length (m) | 
|  | entry.extraFieldLength = buffer.readUInt16LE(30); | 
|  | // 32 - File comment length (k) | 
|  | entry.fileCommentLength = buffer.readUInt16LE(32); | 
|  | // 34 - Disk number where file starts | 
|  | // 36 - Internal file attributes | 
|  | entry.internalFileAttributes = buffer.readUInt16LE(36); | 
|  | // 38 - External file attributes | 
|  | entry.externalFileAttributes = buffer.readUInt32LE(38); | 
|  | // 42 - Relative offset of local file header | 
|  | entry.relativeOffsetOfLocalHeader = buffer.readUInt32LE(42); | 
|  |  | 
|  | self.readEntryCursor += 46; | 
|  |  | 
|  | buffer = new Buffer(entry.fileNameLength + entry.extraFieldLength + entry.fileCommentLength); | 
|  | readAndAssertNoEof(self.reader, buffer, 0, buffer.length, self.readEntryCursor, function(err) { | 
|  | if (err) return emitErrorAndAutoClose(self, err); | 
|  | if (self.emittedError) return; | 
|  | // 46 - File name | 
|  | var isUtf8 = entry.generalPurposeBitFlag & 0x800 | 
|  | try { | 
|  | entry.fileName = bufferToString(buffer, 0, entry.fileNameLength, isUtf8); | 
|  | } catch (e) { | 
|  | return emitErrorAndAutoClose(self, e); | 
|  | } | 
|  |  | 
|  | // 46+n - Extra field | 
|  | var fileCommentStart = entry.fileNameLength + entry.extraFieldLength; | 
|  | var extraFieldBuffer = buffer.slice(entry.fileNameLength, fileCommentStart); | 
|  | entry.extraFields = []; | 
|  | var i = 0; | 
|  | while (i < extraFieldBuffer.length) { | 
|  | var headerId = extraFieldBuffer.readUInt16LE(i + 0); | 
|  | var dataSize = extraFieldBuffer.readUInt16LE(i + 2); | 
|  | var dataStart = i + 4; | 
|  | var dataEnd = dataStart + dataSize; | 
|  | var dataBuffer = new Buffer(dataSize); | 
|  | extraFieldBuffer.copy(dataBuffer, 0, dataStart, dataEnd); | 
|  | entry.extraFields.push({ | 
|  | id: headerId, | 
|  | data: dataBuffer, | 
|  | }); | 
|  | i = dataEnd; | 
|  | } | 
|  |  | 
|  | // 46+n+m - File comment | 
|  | try { | 
|  | entry.fileComment = bufferToString(buffer, fileCommentStart, fileCommentStart + entry.fileCommentLength, isUtf8); | 
|  | } catch (e) { | 
|  | return emitErrorAndAutoClose(self, e); | 
|  | } | 
|  |  | 
|  | self.readEntryCursor += buffer.length; | 
|  | self.entriesRead += 1; | 
|  |  | 
|  | if (entry.uncompressedSize            === 0xffffffff || | 
|  | entry.compressedSize              === 0xffffffff || | 
|  | entry.relativeOffsetOfLocalHeader === 0xffffffff) { | 
|  | // ZIP64 format | 
|  | // find the Zip64 Extended Information Extra Field | 
|  | var zip64EiefBuffer = null; | 
|  | for (var i = 0; i < entry.extraFields.length; i++) { | 
|  | var extraField = entry.extraFields[i]; | 
|  | if (extraField.id === 0x0001) { | 
|  | zip64EiefBuffer = extraField.data; | 
|  | break; | 
|  | } | 
|  | } | 
|  | if (zip64EiefBuffer == null) return emitErrorAndAutoClose(self, new Error("expected Zip64 Extended Information Extra Field")); | 
|  | var index = 0; | 
|  | // 0 - Original Size          8 bytes | 
|  | if (entry.uncompressedSize === 0xffffffff) { | 
|  | if (index + 8 > zip64EiefBuffer.length) return emitErrorAndAutoClose(self, new Error("Zip64 Extended Information Extra Field does not include Original Size")); | 
|  | entry.uncompressedSize = readUInt64LE(zip64EiefBuffer, index); | 
|  | index += 8; | 
|  | } | 
|  | // 8 - Compressed Size        8 bytes | 
|  | if (entry.compressedSize === 0xffffffff) { | 
|  | if (index + 8 > zip64EiefBuffer.length) return emitErrorAndAutoClose(self, new Error("Zip64 Extended Information Extra Field does not include Compressed Size")); | 
|  | entry.compressedSize = readUInt64LE(zip64EiefBuffer, index); | 
|  | index += 8; | 
|  | } | 
|  | // 16 - Relative Header Offset 8 bytes | 
|  | if (entry.relativeOffsetOfLocalHeader === 0xffffffff) { | 
|  | if (index + 8 > zip64EiefBuffer.length) return emitErrorAndAutoClose(self, new Error("Zip64 Extended Information Extra Field does not include Relative Header Offset")); | 
|  | entry.relativeOffsetOfLocalHeader = readUInt64LE(zip64EiefBuffer, index); | 
|  | index += 8; | 
|  | } | 
|  | // 24 - Disk Start Number      4 bytes | 
|  | } | 
|  |  | 
|  | // validate file size | 
|  | if (entry.compressionMethod === 0) { | 
|  | if (entry.compressedSize !== entry.uncompressedSize) { | 
|  | var msg = "compressed/uncompressed size mismatch for stored file: " + entry.compressedSize + " != " + entry.uncompressedSize; | 
|  | return emitErrorAndAutoClose(self, new Error(msg)); | 
|  | } | 
|  | } | 
|  |  | 
|  | // validate file name | 
|  | if (entry.fileName.indexOf("\\") !== -1) return emitErrorAndAutoClose(self, new Error("invalid characters in fileName: " + entry.fileName)); | 
|  | if (/^[a-zA-Z]:/.test(entry.fileName) || /^\//.test(entry.fileName)) return emitErrorAndAutoClose(self, new Error("absolute path: " + entry.fileName)); | 
|  | if (entry.fileName.split("/").indexOf("..") !== -1) return emitErrorAndAutoClose(self, new Error("invalid relative path: " + entry.fileName)); | 
|  | self.emit("entry", entry); | 
|  |  | 
|  | if (!self.lazyEntries) self.readEntry(); | 
|  | }); | 
|  | }); | 
|  | }; | 
|  |  | 
|  | ZipFile.prototype.openReadStream = function(entry, callback) { | 
|  | var self = this; | 
|  | if (!self.isOpen) return callback(new Error("closed")); | 
|  | // make sure we don't lose the fd before we open the actual read stream | 
|  | self.reader.ref(); | 
|  | var buffer = new Buffer(30); | 
|  | readAndAssertNoEof(self.reader, buffer, 0, buffer.length, entry.relativeOffsetOfLocalHeader, function(err) { | 
|  | try { | 
|  | if (err) return callback(err); | 
|  | // 0 - Local file header signature = 0x04034b50 | 
|  | var signature = buffer.readUInt32LE(0); | 
|  | if (signature !== 0x04034b50) return callback(new Error("invalid local file header signature: 0x" + signature.toString(16))); | 
|  | // all this should be redundant | 
|  | // 4 - Version needed to extract (minimum) | 
|  | // 6 - General purpose bit flag | 
|  | // 8 - Compression method | 
|  | // 10 - File last modification time | 
|  | // 12 - File last modification date | 
|  | // 14 - CRC-32 | 
|  | // 18 - Compressed size | 
|  | // 22 - Uncompressed size | 
|  | // 26 - File name length (n) | 
|  | var fileNameLength = buffer.readUInt16LE(26); | 
|  | // 28 - Extra field length (m) | 
|  | var extraFieldLength = buffer.readUInt16LE(28); | 
|  | // 30 - File name | 
|  | // 30+n - Extra field | 
|  | var localFileHeaderEnd = entry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength; | 
|  | var compressed; | 
|  | if (entry.compressionMethod === 0) { | 
|  | // 0 - The file is stored (no compression) | 
|  | compressed = false; | 
|  | } else if (entry.compressionMethod === 8) { | 
|  | // 8 - The file is Deflated | 
|  | compressed = true; | 
|  | } else { | 
|  | return callback(new Error("unsupported compression method: " + entry.compressionMethod)); | 
|  | } | 
|  | var fileDataStart = localFileHeaderEnd; | 
|  | var fileDataEnd = fileDataStart + entry.compressedSize; | 
|  | if (entry.compressedSize !== 0) { | 
|  | // bounds check now, because the read streams will probably not complain loud enough. | 
|  | // since we're dealing with an unsigned offset plus an unsigned size, | 
|  | // we only have 1 thing to check for. | 
|  | if (fileDataEnd > self.fileSize) { | 
|  | return callback(new Error("file data overflows file bounds: " + | 
|  | fileDataStart + " + " + entry.compressedSize + " > " + self.fileSize)); | 
|  | } | 
|  | } | 
|  | var readStream = self.reader.createReadStream({start: fileDataStart, end: fileDataEnd}); | 
|  | var endpointStream = readStream; | 
|  | if (compressed) { | 
|  | var destroyed = false; | 
|  | var inflateFilter = zlib.createInflateRaw(); | 
|  | readStream.on("error", function(err) { | 
|  | // setImmediate here because errors can be emitted during the first call to pipe() | 
|  | setImmediate(function() { | 
|  | if (!destroyed) inflateFilter.emit("error", err); | 
|  | }); | 
|  | }); | 
|  |  | 
|  | var checkerStream = new AssertByteCountStream(entry.uncompressedSize); | 
|  | inflateFilter.on("error", function(err) { | 
|  | // forward zlib errors to the client-visible stream | 
|  | setImmediate(function() { | 
|  | if (!destroyed) checkerStream.emit("error", err); | 
|  | }); | 
|  | }); | 
|  | checkerStream.destroy = function() { | 
|  | destroyed = true; | 
|  | inflateFilter.unpipe(checkerStream); | 
|  | readStream.unpipe(inflateFilter); | 
|  | // TODO: the inflateFilter now causes a memory leak. see Issue #27. | 
|  | readStream.destroy(); | 
|  | }; | 
|  | endpointStream = readStream.pipe(inflateFilter).pipe(checkerStream); | 
|  | } | 
|  | callback(null, endpointStream); | 
|  | } finally { | 
|  | self.reader.unref(); | 
|  | } | 
|  | }); | 
|  | }; | 
|  |  | 
|  | function Entry() { | 
|  | } | 
|  | Entry.prototype.getLastModDate = function() { | 
|  | return dosDateTimeToDate(this.lastModFileDate, this.lastModFileTime); | 
|  | }; | 
|  |  | 
|  | function dosDateTimeToDate(date, time) { | 
|  | var day = date & 0x1f; // 1-31 | 
|  | var month = (date >> 5 & 0xf) - 1; // 1-12, 0-11 | 
|  | var year = (date >> 9 & 0x7f) + 1980; // 0-128, 1980-2108 | 
|  |  | 
|  | var millisecond = 0; | 
|  | var second = (time & 0x1f) * 2; // 0-29, 0-58 (even numbers) | 
|  | var minute = time >> 5 & 0x3f; // 0-59 | 
|  | var hour = time >> 11 & 0x1f; // 0-23 | 
|  |  | 
|  | return new Date(year, month, day, hour, minute, second, millisecond); | 
|  | } | 
|  |  | 
|  | function readAndAssertNoEof(reader, buffer, offset, length, position, callback) { | 
|  | if (length === 0) { | 
|  | // fs.read will throw an out-of-bounds error if you try to read 0 bytes from a 0 byte file | 
|  | return setImmediate(function() { callback(null, new Buffer(0)); }); | 
|  | } | 
|  | reader.read(buffer, offset, length, position, function(err, bytesRead) { | 
|  | if (err) return callback(err); | 
|  | if (bytesRead < length) return callback(new Error("unexpected EOF")); | 
|  | callback(); | 
|  | }); | 
|  | } | 
|  |  | 
|  | util.inherits(AssertByteCountStream, Transform); | 
|  | function AssertByteCountStream(byteCount) { | 
|  | Transform.call(this); | 
|  | this.actualByteCount = 0; | 
|  | this.expectedByteCount = byteCount; | 
|  | } | 
|  | AssertByteCountStream.prototype._transform = function(chunk, encoding, cb) { | 
|  | this.actualByteCount += chunk.length; | 
|  | if (this.actualByteCount > this.expectedByteCount) { | 
|  | var msg = "too many bytes in the stream. expected " + this.expectedByteCount + ". got at least " + this.actualByteCount; | 
|  | return cb(new Error(msg)); | 
|  | } | 
|  | cb(null, chunk); | 
|  | }; | 
|  | AssertByteCountStream.prototype._flush = function(cb) { | 
|  | if (this.actualByteCount < this.expectedByteCount) { | 
|  | var msg = "not enough bytes in the stream. expected " + this.expectedByteCount + ". got only " + this.actualByteCount; | 
|  | return cb(new Error(msg)); | 
|  | } | 
|  | cb(); | 
|  | }; | 
|  |  | 
|  | util.inherits(RandomAccessReader, EventEmitter); | 
|  | function RandomAccessReader() { | 
|  | EventEmitter.call(this); | 
|  | this.refCount = 0; | 
|  | } | 
|  | RandomAccessReader.prototype.ref = function() { | 
|  | this.refCount += 1; | 
|  | }; | 
|  | RandomAccessReader.prototype.unref = function() { | 
|  | var self = this; | 
|  | self.refCount -= 1; | 
|  |  | 
|  | if (self.refCount > 0) return; | 
|  | if (self.refCount < 0) throw new Error("invalid unref"); | 
|  |  | 
|  | self.close(onCloseDone); | 
|  |  | 
|  | function onCloseDone(err) { | 
|  | if (err) return self.emit('error', err); | 
|  | self.emit('close'); | 
|  | } | 
|  | }; | 
|  | RandomAccessReader.prototype.createReadStream = function(options) { | 
|  | var start = options.start; | 
|  | var end = options.end; | 
|  | if (start === end) { | 
|  | var emptyStream = new PassThrough(); | 
|  | setImmediate(function() { | 
|  | emptyStream.end(); | 
|  | }); | 
|  | return emptyStream; | 
|  | } | 
|  | var stream = this._readStreamForRange(start, end); | 
|  |  | 
|  | var destroyed = false; | 
|  | var refUnrefFilter = new RefUnrefFilter(this); | 
|  | stream.on("error", function(err) { | 
|  | setImmediate(function() { | 
|  | if (!destroyed) refUnrefFilter.emit("error", err); | 
|  | }); | 
|  | }); | 
|  | refUnrefFilter.destroy = function() { | 
|  | stream.unpipe(refUnrefFilter); | 
|  | refUnrefFilter.unref(); | 
|  | stream.destroy(); | 
|  | }; | 
|  |  | 
|  | var byteCounter = new AssertByteCountStream(end - start); | 
|  | refUnrefFilter.on("error", function(err) { | 
|  | setImmediate(function() { | 
|  | if (!destroyed) byteCounter.emit("error", err); | 
|  | }); | 
|  | }); | 
|  | byteCounter.destroy = function() { | 
|  | destroyed = true; | 
|  | refUnrefFilter.unpipe(byteCounter); | 
|  | refUnrefFilter.destroy(); | 
|  | }; | 
|  |  | 
|  | return stream.pipe(refUnrefFilter).pipe(byteCounter); | 
|  | }; | 
|  | RandomAccessReader.prototype._readStreamForRange = function(start, end) { | 
|  | throw new Error("not implemented"); | 
|  | }; | 
|  | RandomAccessReader.prototype.read = function(buffer, offset, length, position, callback) { | 
|  | var readStream = this.createReadStream({start: position, end: position + length}); | 
|  | var writeStream = new Writable(); | 
|  | var written = 0; | 
|  | writeStream._write = function(chunk, encoding, cb) { | 
|  | chunk.copy(buffer, offset + written, 0, chunk.length); | 
|  | written += chunk.length; | 
|  | cb(); | 
|  | }; | 
|  | writeStream.on("finish", callback); | 
|  | readStream.on("error", function(error) { | 
|  | callback(error); | 
|  | }); | 
|  | readStream.pipe(writeStream); | 
|  | }; | 
|  | RandomAccessReader.prototype.close = function(callback) { | 
|  | setImmediate(callback); | 
|  | }; | 
|  |  | 
|  | util.inherits(RefUnrefFilter, PassThrough); | 
|  | function RefUnrefFilter(context) { | 
|  | PassThrough.call(this); | 
|  | this.context = context; | 
|  | this.context.ref(); | 
|  | this.unreffedYet = false; | 
|  | } | 
|  | RefUnrefFilter.prototype._flush = function(cb) { | 
|  | this.unref(); | 
|  | cb(); | 
|  | }; | 
|  | RefUnrefFilter.prototype.unref = function(cb) { | 
|  | if (this.unreffedYet) return; | 
|  | this.unreffedYet = true; | 
|  | this.context.unref(); | 
|  | }; | 
|  |  | 
|  | var cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ '; | 
|  | function bufferToString(buffer, start, end, isUtf8) { | 
|  | if (isUtf8) { | 
|  | return buffer.toString("utf8", start, end); | 
|  | } else { | 
|  | var result = ""; | 
|  | for (var i = start; i < end; i++) { | 
|  | result += cp437[buffer[i]]; | 
|  | } | 
|  | return result; | 
|  | } | 
|  | } | 
|  |  | 
|  | function readUInt64LE(buffer, offset) { | 
|  | // there is no native function for this, because we can't actually store 64-bit integers precisely. | 
|  | // after 53 bits, JavaScript's Number type (IEEE 754 double) can't store individual integers anymore. | 
|  | // but since 53 bits is a whole lot more than 32 bits, we do our best anyway. | 
|  | var lower32 = buffer.readUInt32LE(offset); | 
|  | var upper32 = buffer.readUInt32LE(offset + 4); | 
|  | // we can't use bitshifting here, because JavaScript bitshifting only works on 32-bit integers. | 
|  | return upper32 * 0x100000000 + lower32; | 
|  | // as long as we're bounds checking the result of this function against the total file size, | 
|  | // we'll catch any overflow errors, because we already made sure the total file size was within reason. | 
|  | } | 
|  |  | 
|  | function defaultCallback(err) { | 
|  | if (err) throw err; | 
|  | } |