,\n })\n\n this.pointers.splice(pointerIndex, 1)\n this.pointerIsDown = false\n }\n\n _updateLatestPointer (pointer: PointerType, event: PointerEventType, eventTarget: Node) {\n this._latestPointer.pointer = pointer\n this._latestPointer.event = event\n this._latestPointer.eventTarget = eventTarget\n }\n\n destroy () {\n this._latestPointer.pointer = null\n this._latestPointer.event = null\n this._latestPointer.eventTarget = null\n }\n\n _createPreparedEvent (\n event: PointerEventType,\n phase: P,\n preEnd?: boolean,\n type?: string,\n ) {\n return new InteractEvent(this, event, this.prepared.name, phase, this.element, preEnd, type)\n }\n\n _fireEvent (iEvent: InteractEvent) {\n this.interactable.fire(iEvent)\n\n if (!this.prevEvent || iEvent.timeStamp >= this.prevEvent.timeStamp) {\n this.prevEvent = iEvent\n }\n }\n\n _doPhase (\n signalArg: Omit, 'iEvent'> & { iEvent?: InteractEvent },\n ) {\n const { event, phase, preEnd, type } = signalArg\n const { rect } = this\n\n if (rect && phase === 'move') {\n // update the rect changes due to pointer move\n rectUtils.addEdges(this.edges, rect, this.coords.delta[this.interactable.options.deltaSource])\n\n rect.width = rect.right - rect.left\n rect.height = rect.bottom - rect.top\n }\n\n const beforeResult = this._scopeFire(`interactions:before-action-${phase}` as any, signalArg)\n\n if (beforeResult === false) {\n return false\n }\n\n const iEvent = (signalArg.iEvent = this._createPreparedEvent(event, phase, preEnd, type))\n\n this._scopeFire(`interactions:action-${phase}` as any, signalArg)\n\n if (phase === 'start') {\n this.prevEvent = iEvent\n }\n\n this._fireEvent(iEvent)\n\n this._scopeFire(`interactions:after-action-${phase}` as any, signalArg)\n\n return true\n }\n\n _now () {\n return Date.now()\n }\n}\n\nexport default Interaction\nexport { PointerInfo }\n","import type Interaction from '@interactjs/core/Interaction'\nimport { _ProxyMethods } from '@interactjs/core/Interaction'\nimport type { Plugin } from '@interactjs/core/scope'\nimport type { Point } from '@interactjs/types/index'\nimport * as rectUtils from '@interactjs/utils/rect'\n\ndeclare module '@interactjs/core/Interaction' {\n interface Interaction {\n offsetBy?: typeof offsetBy\n offset: {\n total: Point\n pending: Point\n }\n }\n\n enum _ProxyMethods {\n offsetBy = '',\n }\n}\n\n;(_ProxyMethods as any).offsetBy = ''\n\nexport function addTotal (interaction: Interaction) {\n if (!interaction.pointerIsDown) {\n return\n }\n\n addToCoords(interaction.coords.cur, interaction.offset.total)\n\n interaction.offset.pending.x = 0\n interaction.offset.pending.y = 0\n}\n\nfunction beforeAction ({ interaction }: { interaction: Interaction }) {\n applyPending(interaction)\n}\n\nfunction beforeEnd ({ interaction }: { interaction: Interaction }): boolean | void {\n const hadPending = applyPending(interaction)\n\n if (!hadPending) return\n\n interaction.move({ offset: true })\n interaction.end()\n\n return false\n}\n\nfunction end ({ interaction }: { interaction: Interaction }) {\n interaction.offset.total.x = 0\n interaction.offset.total.y = 0\n interaction.offset.pending.x = 0\n interaction.offset.pending.y = 0\n}\n\nexport function applyPending (interaction: Interaction) {\n if (!hasPending(interaction)) {\n return false\n }\n\n const { pending } = interaction.offset\n\n addToCoords(interaction.coords.cur, pending)\n addToCoords(interaction.coords.delta, pending)\n rectUtils.addEdges(interaction.edges, interaction.rect, pending)\n\n pending.x = 0\n pending.y = 0\n\n return true\n}\n\nfunction offsetBy (this: Interaction, { x, y }: Point) {\n this.offset.pending.x += x\n this.offset.pending.y += y\n\n this.offset.total.x += x\n this.offset.total.y += y\n}\n\nfunction addToCoords ({ page, client }, { x, y }: Point) {\n page.x += x\n page.y += y\n client.x += x\n client.y += y\n}\n\nfunction hasPending (interaction: Interaction) {\n return !!(interaction.offset.pending.x || interaction.offset.pending.y)\n}\n\nconst offset: Plugin = {\n id: 'offset',\n before: ['modifiers', 'pointer-events', 'actions', 'inertia'],\n install (scope) {\n scope.Interaction.prototype.offsetBy = offsetBy\n },\n listeners: {\n 'interactions:new': ({ interaction }) => {\n interaction.offset = {\n total: { x: 0, y: 0 },\n pending: { x: 0, y: 0 },\n }\n },\n 'interactions:update-pointer': ({ interaction }) => addTotal(interaction),\n 'interactions:before-action-start': beforeAction,\n 'interactions:before-action-move': beforeAction,\n 'interactions:before-action-end': beforeEnd,\n 'interactions:stop': end,\n },\n}\n\nexport default offset\n","import type { Interaction, DoPhaseArg } from '@interactjs/core/Interaction'\nimport type { ActionName, Scope, SignalArgs, Plugin } from '@interactjs/core/scope'\nimport Modification from '@interactjs/modifiers/Modification'\nimport * as modifiers from '@interactjs/modifiers/base'\nimport offset from '@interactjs/offset/plugin'\nimport type { Point, PointerEventType } from '@interactjs/types/index'\nimport * as dom from '@interactjs/utils/domUtils'\nimport hypot from '@interactjs/utils/hypot'\nimport is from '@interactjs/utils/is'\nimport { copyCoords } from '@interactjs/utils/pointerUtils'\nimport raf from '@interactjs/utils/raf'\n\ndeclare module '@interactjs/core/InteractEvent' {\n interface PhaseMap {\n resume?: true\n inertiastart?: true\n }\n}\n\ndeclare module '@interactjs/core/Interaction' {\n interface Interaction {\n inertia?: InertiaState\n }\n}\n\ndeclare module '@interactjs/core/options' {\n interface PerActionDefaults {\n inertia?: {\n enabled?: boolean\n resistance?: number // the lambda in exponential decay\n minSpeed?: number // target speed must be above this for inertia to start\n endSpeed?: number // the speed at which inertia is slow enough to stop\n allowResume?: true // allow resuming an action in inertia phase\n smoothEndDuration?: number // animate to snap/restrict endOnly if there's no inertia\n }\n }\n}\n\ndeclare module '@interactjs/core/scope' {\n interface SignalArgs {\n 'interactions:before-action-inertiastart': Omit, 'iEvent'>\n 'interactions:action-inertiastart': DoPhaseArg\n 'interactions:after-action-inertiastart': DoPhaseArg\n 'interactions:before-action-resume': Omit, 'iEvent'>\n 'interactions:action-resume': DoPhaseArg\n 'interactions:after-action-resume': DoPhaseArg\n }\n}\n\nfunction install (scope: Scope) {\n const { defaults } = scope\n\n scope.usePlugin(offset)\n scope.usePlugin(modifiers.default)\n scope.actions.phases.inertiastart = true\n scope.actions.phases.resume = true\n\n defaults.perAction.inertia = {\n enabled: false,\n resistance: 10, // the lambda in exponential decay\n minSpeed: 100, // target speed must be above this for inertia to start\n endSpeed: 10, // the speed at which inertia is slow enough to stop\n allowResume: true, // allow resuming an action in inertia phase\n smoothEndDuration: 300, // animate to snap/restrict endOnly if there's no inertia\n }\n}\n\nexport class InertiaState {\n active = false\n isModified = false\n smoothEnd = false\n allowResume = false\n\n modification!: Modification\n modifierCount = 0\n modifierArg!: modifiers.ModifierArg\n\n startCoords!: Point\n t0 = 0\n v0 = 0\n\n te = 0\n targetOffset!: Point\n modifiedOffset!: Point\n currentOffset!: Point\n\n lambda_v0? = 0 // eslint-disable-line camelcase\n one_ve_v0? = 0 // eslint-disable-line camelcase\n timeout!: number\n readonly interaction: Interaction\n\n constructor (interaction: Interaction) {\n this.interaction = interaction\n }\n\n start (event: PointerEventType) {\n const { interaction } = this\n const options = getOptions(interaction)\n\n if (!options || !options.enabled) {\n return false\n }\n\n const { client: velocityClient } = interaction.coords.velocity\n const pointerSpeed = hypot(velocityClient.x, velocityClient.y)\n const modification = this.modification || (this.modification = new Modification(interaction))\n\n modification.copyFrom(interaction.modification)\n\n this.t0 = interaction._now()\n this.allowResume = options.allowResume\n this.v0 = pointerSpeed\n this.currentOffset = { x: 0, y: 0 }\n this.startCoords = interaction.coords.cur.page\n\n this.modifierArg = modification.fillArg({\n pageCoords: this.startCoords,\n preEnd: true,\n phase: 'inertiastart',\n })\n\n const thrown =\n this.t0 - interaction.coords.cur.timeStamp < 50 &&\n pointerSpeed > options.minSpeed &&\n pointerSpeed > options.endSpeed\n\n if (thrown) {\n this.startInertia()\n } else {\n modification.result = modification.setAll(this.modifierArg)\n\n if (!modification.result.changed) {\n return false\n }\n\n this.startSmoothEnd()\n }\n\n // force modification change\n interaction.modification.result.rect = null\n\n // bring inertiastart event to the target coords\n interaction.offsetBy(this.targetOffset)\n interaction._doPhase({\n interaction,\n event,\n phase: 'inertiastart',\n })\n interaction.offsetBy({ x: -this.targetOffset.x, y: -this.targetOffset.y })\n // force modification change\n interaction.modification.result.rect = null\n\n this.active = true\n interaction.simulation = this\n\n return true\n }\n\n startInertia () {\n const startVelocity = this.interaction.coords.velocity.client\n const options = getOptions(this.interaction)\n const lambda = options.resistance\n const inertiaDur = -Math.log(options.endSpeed / this.v0) / lambda\n\n this.targetOffset = {\n x: (startVelocity.x - inertiaDur) / lambda,\n y: (startVelocity.y - inertiaDur) / lambda,\n }\n\n this.te = inertiaDur\n this.lambda_v0 = lambda / this.v0\n this.one_ve_v0 = 1 - options.endSpeed / this.v0\n\n const { modification, modifierArg } = this\n\n modifierArg.pageCoords = {\n x: this.startCoords.x + this.targetOffset.x,\n y: this.startCoords.y + this.targetOffset.y,\n }\n\n modification.result = modification.setAll(modifierArg)\n\n if (modification.result.changed) {\n this.isModified = true\n this.modifiedOffset = {\n x: this.targetOffset.x + modification.result.delta.x,\n y: this.targetOffset.y + modification.result.delta.y,\n }\n }\n\n this.onNextFrame(() => this.inertiaTick())\n }\n\n startSmoothEnd () {\n this.smoothEnd = true\n this.isModified = true\n this.targetOffset = {\n x: this.modification.result.delta.x,\n y: this.modification.result.delta.y,\n }\n\n this.onNextFrame(() => this.smoothEndTick())\n }\n\n onNextFrame (tickFn: () => void) {\n this.timeout = raf.request(() => {\n if (this.active) {\n tickFn()\n }\n })\n }\n\n inertiaTick () {\n const { interaction } = this\n const options = getOptions(interaction)\n const lambda = options.resistance\n const t = (interaction._now() - this.t0) / 1000\n\n if (t < this.te) {\n const progress = 1 - (Math.exp(-lambda * t) - this.lambda_v0) / this.one_ve_v0\n let newOffset: Point\n\n if (this.isModified) {\n newOffset = getQuadraticCurvePoint(\n 0,\n 0,\n this.targetOffset.x,\n this.targetOffset.y,\n this.modifiedOffset.x,\n this.modifiedOffset.y,\n progress,\n )\n } else {\n newOffset = {\n x: this.targetOffset.x * progress,\n y: this.targetOffset.y * progress,\n }\n }\n\n const delta = { x: newOffset.x - this.currentOffset.x, y: newOffset.y - this.currentOffset.y }\n\n this.currentOffset.x += delta.x\n this.currentOffset.y += delta.y\n\n interaction.offsetBy(delta)\n interaction.move()\n\n this.onNextFrame(() => this.inertiaTick())\n } else {\n interaction.offsetBy({\n x: this.modifiedOffset.x - this.currentOffset.x,\n y: this.modifiedOffset.y - this.currentOffset.y,\n })\n\n this.end()\n }\n }\n\n smoothEndTick () {\n const { interaction } = this\n const t = interaction._now() - this.t0\n const { smoothEndDuration: duration } = getOptions(interaction)\n\n if (t < duration) {\n const newOffset = {\n x: easeOutQuad(t, 0, this.targetOffset.x, duration),\n y: easeOutQuad(t, 0, this.targetOffset.y, duration),\n }\n const delta = {\n x: newOffset.x - this.currentOffset.x,\n y: newOffset.y - this.currentOffset.y,\n }\n\n this.currentOffset.x += delta.x\n this.currentOffset.y += delta.y\n\n interaction.offsetBy(delta)\n interaction.move({ skipModifiers: this.modifierCount })\n\n this.onNextFrame(() => this.smoothEndTick())\n } else {\n interaction.offsetBy({\n x: this.targetOffset.x - this.currentOffset.x,\n y: this.targetOffset.y - this.currentOffset.y,\n })\n\n this.end()\n }\n }\n\n resume ({ pointer, event, eventTarget }: SignalArgs['interactions:down']) {\n const { interaction } = this\n\n // undo inertia changes to interaction coords\n interaction.offsetBy({\n x: -this.currentOffset.x,\n y: -this.currentOffset.y,\n })\n\n // update pointer at pointer down position\n interaction.updatePointer(pointer, event, eventTarget, true)\n\n // fire resume signals and event\n interaction._doPhase({\n interaction,\n event,\n phase: 'resume',\n })\n copyCoords(interaction.coords.prev, interaction.coords.cur)\n\n this.stop()\n }\n\n end () {\n this.interaction.move()\n this.interaction.end()\n this.stop()\n }\n\n stop () {\n this.active = this.smoothEnd = false\n this.interaction.simulation = null\n raf.cancel(this.timeout)\n }\n}\n\nfunction start ({ interaction, event }: DoPhaseArg) {\n if (!interaction._interacting || interaction.simulation) {\n return null\n }\n\n const started = interaction.inertia.start(event)\n\n // prevent action end if inertia or smoothEnd\n return started ? false : null\n}\n\n// Check if the down event hits the current inertia target\n// control should be return to the user\nfunction resume (arg: SignalArgs['interactions:down']) {\n const { interaction, eventTarget } = arg\n const state = interaction.inertia\n\n if (!state.active) return\n\n let element = eventTarget as Node\n\n // climb up the DOM tree from the event target\n while (is.element(element)) {\n // if interaction element is the current inertia target element\n if (element === interaction.element) {\n state.resume(arg)\n break\n }\n\n element = dom.parentNode(element)\n }\n}\n\nfunction stop ({ interaction }: { interaction: Interaction }) {\n const state = interaction.inertia\n\n if (state.active) {\n state.stop()\n }\n}\n\nfunction getOptions ({ interactable, prepared }: Interaction) {\n return interactable && interactable.options && prepared.name && interactable.options[prepared.name].inertia\n}\n\nconst inertia: Plugin = {\n id: 'inertia',\n before: ['modifiers', 'actions'],\n install,\n listeners: {\n 'interactions:new': ({ interaction }) => {\n interaction.inertia = new InertiaState(interaction)\n },\n\n 'interactions:before-action-end': start,\n 'interactions:down': resume,\n 'interactions:stop': stop,\n\n 'interactions:before-action-resume': (arg) => {\n const { modification } = arg.interaction\n\n modification.stop(arg)\n modification.start(arg, arg.interaction.coords.cur.page)\n modification.applyToInteraction(arg)\n },\n\n 'interactions:before-action-inertiastart': (arg) => arg.interaction.modification.setAndApply(arg),\n 'interactions:action-resume': modifiers.addEventModifiers,\n 'interactions:action-inertiastart': modifiers.addEventModifiers,\n 'interactions:after-action-inertiastart': (arg) =>\n arg.interaction.modification.restoreInteractionCoords(arg),\n 'interactions:after-action-resume': (arg) => arg.interaction.modification.restoreInteractionCoords(arg),\n },\n}\n\n// http://stackoverflow.com/a/5634528/2280888\nfunction _getQBezierValue (t: number, p1: number, p2: number, p3: number) {\n const iT = 1 - t\n return iT * iT * p1 + 2 * iT * t * p2 + t * t * p3\n}\n\nfunction getQuadraticCurvePoint (\n startX: number,\n startY: number,\n cpX: number,\n cpY: number,\n endX: number,\n endY: number,\n position: number,\n) {\n return {\n x: _getQBezierValue(position, startX, cpX, endX),\n y: _getQBezierValue(position, startY, cpY, endY),\n }\n}\n\n// http://gizma.com/easing/\nfunction easeOutQuad (t: number, b: number, c: number, d: number) {\n t /= d\n return -c * t * (t - 2) + b\n}\n\nexport default inertia\n","import type { Listener, ListenersArg, Rect } from '@interactjs/types/index'\nimport * as arr from '@interactjs/utils/arr'\nimport extend from '@interactjs/utils/extend'\nimport type { NormalizedListeners } from '@interactjs/utils/normalizeListeners'\nimport normalize from '@interactjs/utils/normalizeListeners'\n\nfunction fireUntilImmediateStopped (event: any, listeners: Listener[]) {\n for (const listener of listeners) {\n if (event.immediatePropagationStopped) {\n break\n }\n\n listener(event)\n }\n}\n\nexport class Eventable {\n options: any\n types: NormalizedListeners = {}\n propagationStopped = false\n immediatePropagationStopped = false\n global: any\n\n constructor (options?: { [index: string]: any }) {\n this.options = extend({}, options || {})\n }\n\n fire (event: T) {\n let listeners: Listener[]\n const global = this.global\n\n // Interactable#on() listeners\n // tslint:disable no-conditional-assignment\n if ((listeners = this.types[event.type])) {\n fireUntilImmediateStopped(event, listeners)\n }\n\n // interact.on() listeners\n if (!event.propagationStopped && global && (listeners = global[event.type])) {\n fireUntilImmediateStopped(event, listeners)\n }\n }\n\n on (type: string, listener: ListenersArg) {\n const listeners = normalize(type, listener)\n\n for (type in listeners) {\n this.types[type] = arr.merge(this.types[type] || [], listeners[type])\n }\n }\n\n off (type: string, listener: ListenersArg) {\n const listeners = normalize(type, listener)\n\n for (type in listeners) {\n const eventList = this.types[type]\n\n if (!eventList || !eventList.length) {\n continue\n }\n\n for (const subListener of listeners[type]) {\n const index = eventList.indexOf(subListener)\n\n if (index !== -1) {\n eventList.splice(index, 1)\n }\n }\n }\n }\n\n getRect (_element: Element): Rect {\n return null\n }\n}\n","import type { Actions } from '@interactjs/core/scope'\n\nexport default function isNonNativeEvent (type: string, actions: Actions) {\n if (actions.phaselessTypes[type]) {\n return true\n }\n\n for (const name in actions.map) {\n if (type.indexOf(name) === 0 && type.substr(name.length) in actions.phases) {\n return true\n }\n }\n\n return false\n}\n","/** @module interact */\nimport type { Scope, Plugin } from '@interactjs/core/scope'\nimport type { Context, EventTypes, Listener, ListenersArg, Target, Element } from '@interactjs/types/index'\nimport browser from '@interactjs/utils/browser'\nimport * as domUtils from '@interactjs/utils/domUtils'\nimport is from '@interactjs/utils/is'\nimport { warnOnce } from '@interactjs/utils/misc'\nimport * as pointerUtils from '@interactjs/utils/pointerUtils'\n\nimport type { Interactable } from './Interactable'\nimport isNonNativeEvent from './isNonNativeEvent'\nimport type { Options } from './options'\n\ndeclare module '@interactjs/core/InteractStatic' {\n export interface InteractStatic {\n (target: Target, options?: Options): Interactable\n getPointerAverage: typeof pointerUtils.pointerAverage\n getTouchBBox: typeof pointerUtils.touchBBox\n getTouchDistance: typeof pointerUtils.touchDistance\n getTouchAngle: typeof pointerUtils.touchAngle\n getElementRect: typeof domUtils.getElementRect\n getElementClientRect: typeof domUtils.getElementClientRect\n matchesSelector: typeof domUtils.matchesSelector\n closest: typeof domUtils.closest\n /** @internal */ globalEvents: any\n version: string\n /** @internal */ scope: Scope\n use(\n plugin: Plugin,\n options?: {\n [key: string]: any\n },\n ): any\n isSet(target: Element, options?: any): boolean\n on(type: string | EventTypes, listener: ListenersArg, options?: object): any\n off(type: EventTypes, listener: any, options?: object): any\n debug(): any\n supportsTouch(): boolean\n supportsPointerEvent(): boolean\n stop(): any\n pointerMoveTolerance(newValue?: number): any\n addDocument(doc: Document, options?: object): void\n removeDocument(doc: Document): void\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/consistent-type-imports\ntype _InteractStatic = import('@interactjs/core/InteractStatic').InteractStatic\n\nexport function createInteractStatic (scope: Scope): _InteractStatic {\n /**\n * ```js\n * interact('#draggable').draggable(true)\n *\n * var rectables = interact('rect')\n * rectables\n * .gesturable(true)\n * .on('gesturemove', function (event) {\n * // ...\n * })\n * ```\n *\n * The methods of this variable can be used to set elements as interactables\n * and also to change various default settings.\n *\n * Calling it as a function and passing an element or a valid CSS selector\n * string returns an Interactable object which has various methods to configure\n * it.\n *\n * @global\n *\n * @param {Element | string} target The HTML or SVG Element to interact with\n * or CSS selector\n * @return {Interactable}\n */\n const interact = ((target: Target, options: Options) => {\n let interactable = scope.interactables.get(target, options)\n\n if (!interactable) {\n interactable = scope.interactables.new(target, options)\n interactable.events.global = interact.globalEvents\n }\n\n return interactable\n }) as _InteractStatic\n\n // expose the functions used to calculate multi-touch properties\n interact.getPointerAverage = pointerUtils.pointerAverage\n interact.getTouchBBox = pointerUtils.touchBBox\n interact.getTouchDistance = pointerUtils.touchDistance\n interact.getTouchAngle = pointerUtils.touchAngle\n\n interact.getElementRect = domUtils.getElementRect\n interact.getElementClientRect = domUtils.getElementClientRect\n interact.matchesSelector = domUtils.matchesSelector\n interact.closest = domUtils.closest\n\n interact.globalEvents = {} as any\n\n // eslint-disable-next-line no-undef\n interact.version = process.env.npm_package_version\n interact.scope = scope\n /**\n * Use a plugin\n *\n * @alias module:interact.use\n *\n */\n interact.use = function (plugin, options) {\n this.scope.usePlugin(plugin, options)\n\n return this\n }\n\n /**\n * Check if an element or selector has been set with the {@link interact}\n * function\n *\n * @alias module:interact.isSet\n *\n * @param {Target} target The Element or string being searched for\n * @param {object} options\n * @return {boolean} Indicates if the element or CSS selector was previously\n * passed to interact\n */\n interact.isSet = function (target: Target, options?: { context?: Context }): boolean {\n return !!this.scope.interactables.get(target, options && options.context)\n }\n\n /**\n * @deprecated\n * Add a global listener for an InteractEvent or adds a DOM event to `document`\n *\n * @alias module:interact.on\n *\n * @param {string | array | object} type The types of events to listen for\n * @param {function} listener The function event (s)\n * @param {object | boolean} [options] object or useCapture flag for\n * addEventListener\n * @return {object} interact\n */\n interact.on = warnOnce(function on (type: string | EventTypes, listener: ListenersArg, options?: object) {\n if (is.string(type) && type.search(' ') !== -1) {\n type = type.trim().split(/ +/)\n }\n\n if (is.array(type)) {\n for (const eventType of type as any[]) {\n this.on(eventType, listener, options)\n }\n\n return this\n }\n\n if (is.object(type)) {\n for (const prop in type) {\n this.on(prop, (type as any)[prop], listener)\n }\n\n return this\n }\n\n // if it is an InteractEvent type, add listener to globalEvents\n if (isNonNativeEvent(type, this.scope.actions)) {\n // if this type of event was never bound\n if (!this.globalEvents[type]) {\n this.globalEvents[type] = [listener]\n } else {\n this.globalEvents[type].push(listener)\n }\n }\n // If non InteractEvent type, addEventListener to document\n else {\n this.scope.events.add(this.scope.document, type, listener as Listener, { options })\n }\n\n return this\n }, 'The interact.on() method is being deprecated')\n\n /**\n * @deprecated\n * Removes a global InteractEvent listener or DOM event from `document`\n *\n * @alias module:interact.off\n *\n * @param {string | array | object} type The types of events that were listened\n * for\n * @param {function} listener The listener function to be removed\n * @param {object | boolean} options [options] object or useCapture flag for\n * removeEventListener\n * @return {object} interact\n */\n interact.off = warnOnce(function off (type: EventTypes, listener: any, options?: object) {\n if (is.string(type) && type.search(' ') !== -1) {\n type = type.trim().split(/ +/)\n }\n\n if (is.array(type)) {\n for (const eventType of type) {\n this.off(eventType, listener, options)\n }\n\n return this\n }\n\n if (is.object(type)) {\n for (const prop in type) {\n this.off(prop, type[prop], listener)\n }\n\n return this\n }\n\n if (isNonNativeEvent(type, this.scope.actions)) {\n let index: number\n\n if (type in this.globalEvents && (index = this.globalEvents[type].indexOf(listener)) !== -1) {\n this.globalEvents[type].splice(index, 1)\n }\n } else {\n this.scope.events.remove(this.scope.document, type, listener, options)\n }\n\n return this\n }, 'The interact.off() method is being deprecated')\n\n interact.debug = function () {\n return this.scope\n }\n\n /**\n * @alias module:interact.supportsTouch\n *\n * @return {boolean} Whether or not the browser supports touch input\n */\n interact.supportsTouch = function () {\n return browser.supportsTouch\n }\n\n /**\n * @alias module:interact.supportsPointerEvent\n *\n * @return {boolean} Whether or not the browser supports PointerEvents\n */\n interact.supportsPointerEvent = function () {\n return browser.supportsPointerEvent\n }\n\n /**\n * Cancels all interactions (end events are not fired)\n *\n * @alias module:interact.stop\n *\n * @return {object} interact\n */\n interact.stop = function () {\n for (const interaction of this.scope.interactions.list) {\n interaction.stop()\n }\n\n return this\n }\n\n /**\n * Returns or sets the distance the pointer must be moved before an action\n * sequence occurs. This also affects tolerance for tap events.\n *\n * @alias module:interact.pointerMoveTolerance\n *\n * @param {number} [newValue] The movement from the start position must be greater than this value\n * @return {interact | number}\n */\n interact.pointerMoveTolerance = function (newValue?: number) {\n if (is.number(newValue)) {\n this.scope.interactions.pointerMoveTolerance = newValue\n\n return this\n }\n\n return this.scope.interactions.pointerMoveTolerance\n }\n\n interact.addDocument = function (doc: Document, options?: object) {\n this.scope.addDocument(doc, options)\n }\n\n interact.removeDocument = function (doc: Document) {\n this.scope.removeDocument(doc)\n }\n\n return interact\n}\n","/* eslint-disable no-dupe-class-members */\nimport type { ActionMap, ActionName, Actions, Scope } from '@interactjs/core/scope'\nimport type {\n Context,\n Element,\n Target,\n Listeners,\n OrBoolean,\n EventTypes,\n ListenersArg,\n ActionMethod,\n} from '@interactjs/types/index'\nimport * as arr from '@interactjs/utils/arr'\nimport browser from '@interactjs/utils/browser'\nimport clone from '@interactjs/utils/clone'\nimport { getElementRect, matchesUpTo, nodeContains, trySelector } from '@interactjs/utils/domUtils'\nimport extend from '@interactjs/utils/extend'\nimport is from '@interactjs/utils/is'\nimport normalizeListeners from '@interactjs/utils/normalizeListeners'\nimport { getWindow } from '@interactjs/utils/window'\n\nimport { Eventable } from './Eventable'\nimport isNonNativeEvent from './isNonNativeEvent'\nimport type { ActionDefaults, Defaults, OptionsArg, PerActionDefaults } from './options'\nimport { Options } from './options'\n\ntype IgnoreValue = string | Element | boolean\ntype DeltaSource = 'page' | 'client'\n\n/** */\nexport class Interactable implements Partial {\n /** @internal */ get _defaults (): Defaults {\n return {\n base: {},\n perAction: {},\n actions: {} as ActionDefaults,\n }\n }\n\n readonly options!: Required\n readonly _actions: Actions\n readonly target: Target\n readonly events = new Eventable()\n readonly _context: Context\n readonly _win: Window\n readonly _doc: Document\n readonly _scopeEvents: Scope['events']\n\n /** @internal */ _rectChecker?: typeof Interactable.prototype.getRect\n\n /** */\n constructor (\n target: Target,\n options: any,\n defaultContext: Document | Element,\n scopeEvents: Scope['events'],\n ) {\n this._actions = options.actions\n this.target = target\n this._context = options.context || defaultContext\n this._win = getWindow(trySelector(target) ? this._context : target)\n this._doc = this._win.document\n this._scopeEvents = scopeEvents\n\n this.set(options)\n }\n\n setOnEvents (actionName: ActionName, phases: NonNullable) {\n if (is.func(phases.onstart)) {\n this.on(`${actionName}start`, phases.onstart)\n }\n if (is.func(phases.onmove)) {\n this.on(`${actionName}move`, phases.onmove)\n }\n if (is.func(phases.onend)) {\n this.on(`${actionName}end`, phases.onend)\n }\n if (is.func(phases.oninertiastart)) {\n this.on(`${actionName}inertiastart`, phases.oninertiastart)\n }\n\n return this\n }\n\n updatePerActionListeners (actionName: ActionName, prev: Listeners, cur: Listeners) {\n if (is.array(prev) || is.object(prev)) {\n this.off(actionName, prev)\n }\n\n if (is.array(cur) || is.object(cur)) {\n this.on(actionName, cur)\n }\n }\n\n setPerAction (actionName: ActionName, options: OrBoolean) {\n const defaults = this._defaults\n\n // for all the default per-action options\n for (const optionName_ in options) {\n const optionName = optionName_ as keyof PerActionDefaults\n const actionOptions = this.options[actionName]\n const optionValue: any = options[optionName]\n\n // remove old event listeners and add new ones\n if (optionName === 'listeners') {\n this.updatePerActionListeners(actionName, actionOptions.listeners, optionValue as Listeners)\n }\n\n // if the option value is an array\n if (is.array(optionValue)) {\n ;(actionOptions[optionName] as any) = arr.from(optionValue)\n }\n // if the option value is an object\n else if (is.plainObject(optionValue)) {\n // copy the object\n ;(actionOptions[optionName] as any) = extend(\n actionOptions[optionName] || ({} as any),\n clone(optionValue),\n )\n\n // set anabled field to true if it exists in the defaults\n if (\n is.object(defaults.perAction[optionName]) &&\n 'enabled' in (defaults.perAction[optionName] as any)\n ) {\n ;(actionOptions[optionName] as any).enabled = optionValue.enabled !== false\n }\n }\n // if the option value is a boolean and the default is an object\n else if (is.bool(optionValue) && is.object(defaults.perAction[optionName])) {\n ;(actionOptions[optionName] as any).enabled = optionValue\n }\n // if it's anything else, do a plain assignment\n else {\n ;(actionOptions[optionName] as any) = optionValue\n }\n }\n }\n\n /**\n * The default function to get an Interactables bounding rect. Can be\n * overridden using {@link Interactable.rectChecker}.\n *\n * @param {Element} [element] The element to measure.\n * @return {Rect} The object's bounding rectangle.\n */\n getRect (element: Element) {\n element = element || (is.element(this.target) ? this.target : null)\n\n if (is.string(this.target)) {\n element = element || this._context.querySelector(this.target)\n }\n\n return getElementRect(element)\n }\n\n /**\n * Returns or sets the function used to calculate the interactable's\n * element's rectangle\n *\n * @param {function} [checker] A function which returns this Interactable's\n * bounding rectangle. See {@link Interactable.getRect}\n * @return {function | object} The checker function or this Interactable\n */\n rectChecker(): (element: Element) => any | null\n rectChecker(checker: (element: Element) => any): this\n rectChecker (checker?: (element: Element) => any) {\n if (is.func(checker)) {\n this._rectChecker = checker\n\n this.getRect = (element) => {\n const rect = extend({}, this._rectChecker(element))\n\n if (!(('width' in rect) as unknown)) {\n rect.width = rect.right - rect.left\n rect.height = rect.bottom - rect.top\n }\n\n return rect\n }\n\n return this\n }\n\n if (checker === null) {\n delete this.getRect\n delete this._rectChecker\n\n return this\n }\n\n return this.getRect\n }\n\n _backCompatOption (optionName: keyof Options, newValue: any) {\n if (trySelector(newValue) || is.object(newValue)) {\n ;(this.options[optionName] as any) = newValue\n\n for (const action in this._actions.map) {\n ;(this.options[action as keyof ActionMap] as any)[optionName] = newValue\n }\n\n return this\n }\n\n return this.options[optionName]\n }\n\n /**\n * Gets or sets the origin of the Interactable's element. The x and y\n * of the origin will be subtracted from action event coordinates.\n *\n * @param {Element | object | string} [origin] An HTML or SVG Element whose\n * rect will be used, an object eg. { x: 0, y: 0 } or string 'parent', 'self'\n * or any CSS selector\n *\n * @return {object} The current origin or this Interactable\n */\n origin (newValue: any) {\n return this._backCompatOption('origin', newValue)\n }\n\n /**\n * Returns or sets the mouse coordinate types used to calculate the\n * movement of the pointer.\n *\n * @param {string} [newValue] Use 'client' if you will be scrolling while\n * interacting; Use 'page' if you want autoScroll to work\n * @return {string | object} The current deltaSource or this Interactable\n */\n deltaSource(): DeltaSource\n deltaSource(newValue: DeltaSource): this\n deltaSource (newValue?: DeltaSource) {\n if (newValue === 'page' || newValue === 'client') {\n this.options.deltaSource = newValue\n\n return this\n }\n\n return this.options.deltaSource\n }\n\n /**\n * Gets the selector context Node of the Interactable. The default is\n * `window.document`.\n *\n * @return {Node} The context Node of this Interactable\n */\n context () {\n return this._context\n }\n\n inContext (element: Document | Node) {\n return this._context === element.ownerDocument || nodeContains(this._context, element)\n }\n\n testIgnoreAllow (\n this: Interactable,\n options: { ignoreFrom?: IgnoreValue, allowFrom?: IgnoreValue },\n targetNode: Node,\n eventTarget: Node,\n ) {\n return (\n !this.testIgnore(options.ignoreFrom, targetNode, eventTarget) &&\n this.testAllow(options.allowFrom, targetNode, eventTarget)\n )\n }\n\n testAllow (this: Interactable, allowFrom: IgnoreValue, targetNode: Node, element: Node) {\n if (!allowFrom) {\n return true\n }\n\n if (!is.element(element)) {\n return false\n }\n\n if (is.string(allowFrom)) {\n return matchesUpTo(element, allowFrom, targetNode)\n } else if (is.element(allowFrom)) {\n return nodeContains(allowFrom, element)\n }\n\n return false\n }\n\n testIgnore (this: Interactable, ignoreFrom: IgnoreValue, targetNode: Node, element: Node) {\n if (!ignoreFrom || !is.element(element)) {\n return false\n }\n\n if (is.string(ignoreFrom)) {\n return matchesUpTo(element, ignoreFrom, targetNode)\n } else if (is.element(ignoreFrom)) {\n return nodeContains(ignoreFrom, element)\n }\n\n return false\n }\n\n /**\n * Calls listeners for the given InteractEvent type bound globally\n * and directly to this Interactable\n *\n * @param {InteractEvent} iEvent The InteractEvent object to be fired on this\n * Interactable\n * @return {Interactable} this Interactable\n */\n fire (iEvent: E) {\n this.events.fire(iEvent)\n\n return this\n }\n\n _onOff (method: 'on' | 'off', typeArg: EventTypes, listenerArg?: ListenersArg | null, options?: any) {\n if (is.object(typeArg) && !is.array(typeArg)) {\n options = listenerArg\n listenerArg = null\n }\n\n const addRemove = method === 'on' ? 'add' : 'remove'\n const listeners = normalizeListeners(typeArg, listenerArg)\n\n for (let type in listeners) {\n if (type === 'wheel') {\n type = browser.wheelEvent\n }\n\n for (const listener of listeners[type]) {\n // if it is an action event type\n if (isNonNativeEvent(type, this._actions)) {\n this.events[method](type, listener)\n }\n // delegated event\n else if (is.string(this.target)) {\n this._scopeEvents[`${addRemove}Delegate` as 'addDelegate' | 'removeDelegate'](\n this.target,\n this._context,\n type,\n listener,\n options,\n )\n }\n // remove listener from this Interactable's element\n else {\n this._scopeEvents[addRemove](this.target, type, listener, options)\n }\n }\n }\n\n return this\n }\n\n /**\n * Binds a listener for an InteractEvent, pointerEvent or DOM event.\n *\n * @param {string | array | object} types The types of events to listen\n * for\n * @param {function | array | object} [listener] The event listener function(s)\n * @param {object | boolean} [options] options object or useCapture flag for\n * addEventListener\n * @return {Interactable} This Interactable\n */\n on (types: EventTypes, listener?: ListenersArg, options?: any) {\n return this._onOff('on', types, listener, options)\n }\n\n /**\n * Removes an InteractEvent, pointerEvent or DOM event listener.\n *\n * @param {string | array | object} types The types of events that were\n * listened for\n * @param {function | array | object} [listener] The event listener function(s)\n * @param {object | boolean} [options] options object or useCapture flag for\n * removeEventListener\n * @return {Interactable} This Interactable\n */\n off (types: string | string[] | EventTypes, listener?: ListenersArg, options?: any) {\n return this._onOff('off', types, listener, options)\n }\n\n /**\n * Reset the options of this Interactable\n *\n * @param {object} options The new settings to apply\n * @return {object} This Interactable\n */\n set (options: OptionsArg) {\n const defaults = this._defaults\n\n if (!is.object(options)) {\n options = {}\n }\n\n ;(this.options as Required) = clone(defaults.base) as Required\n\n for (const actionName_ in this._actions.methodDict) {\n const actionName = actionName_ as ActionName\n const methodName = this._actions.methodDict[actionName]\n\n this.options[actionName] = {}\n this.setPerAction(actionName, extend(extend({}, defaults.perAction), defaults.actions[actionName]))\n ;(this[methodName] as ActionMethod)(options[actionName])\n }\n\n for (const setting in options) {\n if (is.func((this as any)[setting])) {\n ;(this as any)[setting](options[setting as keyof typeof options])\n }\n }\n\n return this\n }\n\n /**\n * Remove this interactable from the list of interactables and remove it's\n * action capabilities and event listeners\n */\n unset () {\n if (is.string(this.target)) {\n // remove delegated events\n for (const type in this._scopeEvents.delegatedEvents) {\n const delegated = this._scopeEvents.delegatedEvents[type]\n\n for (let i = delegated.length - 1; i >= 0; i--) {\n const { selector, context, listeners } = delegated[i]\n\n if (selector === this.target && context === this._context) {\n delegated.splice(i, 1)\n }\n\n for (let l = listeners.length - 1; l >= 0; l--) {\n this._scopeEvents.removeDelegate(\n this.target,\n this._context,\n type,\n listeners[l][0],\n listeners[l][1],\n )\n }\n }\n }\n } else {\n this._scopeEvents.remove(this.target as Node, 'all')\n }\n }\n}\n","import type { Interactable } from '@interactjs/core/Interactable'\nimport type { OptionsArg, Options } from '@interactjs/core/options'\nimport type { Scope } from '@interactjs/core/scope'\nimport type { Target, Context } from '@interactjs/types/index'\nimport * as arr from '@interactjs/utils/arr'\nimport * as domUtils from '@interactjs/utils/domUtils'\nimport extend from '@interactjs/utils/extend'\nimport is from '@interactjs/utils/is'\n\ndeclare module '@interactjs/core/scope' {\n interface SignalArgs {\n 'interactable:new': {\n interactable: Interactable\n target: Target\n options: OptionsArg\n win: Window\n }\n }\n}\n\ninterface InteractableScopeProp {\n context: Context\n interactable: Interactable\n}\n\nexport class InteractableSet {\n // all set interactables\n list: Interactable[] = []\n\n selectorMap: {\n [selector: string]: InteractableScopeProp[]\n } = {}\n\n scope: Scope\n\n constructor (scope: Scope) {\n this.scope = scope\n scope.addListeners({\n 'interactable:unset': ({ interactable }) => {\n const { target, _context: context } = interactable\n const targetMappings: InteractableScopeProp[] = is.string(target)\n ? this.selectorMap[target]\n : (target as any)[this.scope.id]\n\n const targetIndex = arr.findIndex(targetMappings, (m) => m.context === context)\n if (targetMappings[targetIndex]) {\n // Destroying mappingInfo's context and interactable\n targetMappings[targetIndex].context = null\n targetMappings[targetIndex].interactable = null\n }\n targetMappings.splice(targetIndex, 1)\n },\n })\n }\n\n new (target: Target, options?: any): Interactable {\n options = extend(options || {}, {\n actions: this.scope.actions,\n })\n const interactable = new this.scope.Interactable(target, options, this.scope.document, this.scope.events)\n const mappingInfo = { context: interactable._context, interactable }\n\n this.scope.addDocument(interactable._doc)\n this.list.push(interactable)\n\n if (is.string(target)) {\n if (!this.selectorMap[target]) {\n this.selectorMap[target] = []\n }\n this.selectorMap[target].push(mappingInfo)\n } else {\n if (!(interactable.target as any)[this.scope.id]) {\n Object.defineProperty(target, this.scope.id, {\n value: [],\n configurable: true,\n })\n }\n\n ;(target as any)[this.scope.id].push(mappingInfo)\n }\n\n this.scope.fire('interactable:new', {\n target,\n options,\n interactable,\n win: this.scope._win,\n })\n\n return interactable\n }\n\n get (target: Target, options?: Options) {\n const context = (options && options.context) || this.scope.document\n const isSelector = is.string(target)\n const targetMappings: InteractableScopeProp[] = isSelector\n ? this.selectorMap[target as string]\n : (target as any)[this.scope.id]\n\n if (!targetMappings) {\n return null\n }\n\n const found = arr.find(\n targetMappings,\n (m) => m.context === context && (isSelector || m.interactable.inContext(target as any)),\n )\n\n return found && found.interactable\n }\n\n forEachMatch (node: Node, callback: (interactable: Interactable) => T) {\n for (const interactable of this.list) {\n let ret: void | T\n\n if (\n (is.string(interactable.target)\n ? // target is a selector and the element matches\n is.element(node) && domUtils.matchesSelector(node, interactable.target)\n : // target is the element\n node === interactable.target) &&\n // the element is in context\n interactable.inContext(node)\n ) {\n ret = callback(interactable)\n }\n\n if (ret !== undefined) {\n return ret\n }\n }\n }\n}\n","import type { Scope } from '@interactjs/core/scope'\nimport type { Element } from '@interactjs/types/index'\nimport * as arr from '@interactjs/utils/arr'\nimport * as domUtils from '@interactjs/utils/domUtils'\nimport extend from '@interactjs/utils/extend'\nimport is from '@interactjs/utils/is'\nimport pExtend from '@interactjs/utils/pointerExtend'\nimport * as pointerUtils from '@interactjs/utils/pointerUtils'\n\ndeclare module '@interactjs/core/scope' {\n interface Scope {\n events: ReturnType\n }\n}\n\ntype Listener = (event: Event | FakeEvent) => any\n\nfunction install (scope: Scope) {\n const targets: Array<{\n eventTarget: EventTarget\n events: { [type: string]: Listener[] }\n }> = []\n\n const delegatedEvents: {\n [type: string]: Array<{\n selector: string\n context: Node\n listeners: Array<[Listener, { capture: boolean, passive: boolean }]>\n }>\n } = {}\n const documents: Document[] = []\n\n const eventsMethods = {\n add,\n remove,\n\n addDelegate,\n removeDelegate,\n\n delegateListener,\n delegateUseCapture,\n delegatedEvents,\n documents,\n\n targets,\n\n supportsOptions: false,\n supportsPassive: false,\n }\n\n // check if browser supports passive events and options arg\n scope.document?.createElement('div').addEventListener('test', null, {\n get capture () {\n return (eventsMethods.supportsOptions = true)\n },\n get passive () {\n return (eventsMethods.supportsPassive = true)\n },\n })\n\n scope.events = eventsMethods\n\n function add (eventTarget: EventTarget, type: string, listener: Listener, optionalArg?: boolean | any) {\n const options = getOptions(optionalArg)\n let target = arr.find(targets, (t) => t.eventTarget === eventTarget)\n\n if (!target) {\n target = {\n eventTarget,\n events: {},\n }\n\n targets.push(target)\n }\n\n if (!target.events[type]) {\n target.events[type] = []\n }\n\n if (eventTarget.addEventListener && !arr.contains(target.events[type], listener)) {\n eventTarget.addEventListener(\n type,\n listener as any,\n eventsMethods.supportsOptions ? options : options.capture,\n )\n target.events[type].push(listener)\n }\n }\n\n function remove (\n eventTarget: EventTarget,\n type: string,\n listener?: 'all' | Listener,\n optionalArg?: boolean | any,\n ) {\n const options = getOptions(optionalArg)\n const targetIndex = arr.findIndex(targets, (t) => t.eventTarget === eventTarget)\n const target = targets[targetIndex]\n\n if (!target || !target.events) {\n return\n }\n\n if (type === 'all') {\n for (type in target.events) {\n if (target.events.hasOwnProperty(type)) {\n remove(eventTarget, type, 'all')\n }\n }\n return\n }\n\n let typeIsEmpty = false\n const typeListeners = target.events[type]\n\n if (typeListeners) {\n if (listener === 'all') {\n for (let i = typeListeners.length - 1; i >= 0; i--) {\n remove(eventTarget, type, typeListeners[i], options)\n }\n return\n } else {\n for (let i = 0; i < typeListeners.length; i++) {\n if (typeListeners[i] === listener) {\n eventTarget.removeEventListener(\n type,\n listener as any,\n eventsMethods.supportsOptions ? options : options.capture,\n )\n typeListeners.splice(i, 1)\n\n if (typeListeners.length === 0) {\n delete target.events[type]\n typeIsEmpty = true\n }\n\n break\n }\n }\n }\n }\n\n if (typeIsEmpty && !Object.keys(target.events).length) {\n targets.splice(targetIndex, 1)\n }\n }\n\n function addDelegate (selector: string, context: Node, type: string, listener: Listener, optionalArg?: any) {\n const options = getOptions(optionalArg)\n if (!delegatedEvents[type]) {\n delegatedEvents[type] = []\n\n // add delegate listener functions\n for (const doc of documents) {\n add(doc, type, delegateListener)\n add(doc, type, delegateUseCapture, true)\n }\n }\n\n const delegates = delegatedEvents[type]\n let delegate = arr.find(delegates, (d) => d.selector === selector && d.context === context)\n\n if (!delegate) {\n delegate = { selector, context, listeners: [] }\n delegates.push(delegate)\n }\n\n delegate.listeners.push([listener, options])\n }\n\n function removeDelegate (\n selector: string,\n context: Document | Element,\n type: string,\n listener?: Listener,\n optionalArg?: any,\n ) {\n const options = getOptions(optionalArg)\n const delegates = delegatedEvents[type]\n let matchFound = false\n let index: number\n\n if (!delegates) return\n\n // count from last index of delegated to 0\n for (index = delegates.length - 1; index >= 0; index--) {\n const cur = delegates[index]\n // look for matching selector and context Node\n if (cur.selector === selector && cur.context === context) {\n const { listeners } = cur\n\n // each item of the listeners array is an array: [function, capture, passive]\n for (let i = listeners.length - 1; i >= 0; i--) {\n const [fn, { capture, passive }] = listeners[i]\n\n // check if the listener functions and capture and passive flags match\n if (fn === listener && capture === options.capture && passive === options.passive) {\n // remove the listener from the array of listeners\n listeners.splice(i, 1)\n\n // if all listeners for this target have been removed\n // remove the target from the delegates array\n if (!listeners.length) {\n delegates.splice(index, 1)\n\n // remove delegate function from context\n remove(context, type, delegateListener)\n remove(context, type, delegateUseCapture, true)\n }\n\n // only remove one listener\n matchFound = true\n break\n }\n }\n\n if (matchFound) {\n break\n }\n }\n }\n }\n\n // bound to the interactable context when a DOM event\n // listener is added to a selector interactable\n function delegateListener (event: Event | FakeEvent, optionalArg?: any) {\n const options = getOptions(optionalArg)\n const fakeEvent = new FakeEvent(event as Event)\n const delegates = delegatedEvents[event.type]\n const [eventTarget] = pointerUtils.getEventTargets(event as Event)\n let element: Node = eventTarget\n\n // climb up document tree looking for selector matches\n while (is.element(element)) {\n for (let i = 0; i < delegates.length; i++) {\n const cur = delegates[i]\n const { selector, context } = cur\n\n if (\n domUtils.matchesSelector(element, selector) &&\n domUtils.nodeContains(context, eventTarget) &&\n domUtils.nodeContains(context, element)\n ) {\n const { listeners } = cur\n\n fakeEvent.currentTarget = element\n\n for (const [fn, { capture, passive }] of listeners) {\n if (capture === options.capture && passive === options.passive) {\n fn(fakeEvent)\n }\n }\n }\n }\n\n element = domUtils.parentNode(element)\n }\n }\n\n function delegateUseCapture (this: Element, event: Event | FakeEvent) {\n return delegateListener.call(this, event, true)\n }\n\n // for type inferrence\n return eventsMethods\n}\n\nclass FakeEvent implements Partial {\n currentTarget: Node\n originalEvent: Event\n type: string\n\n constructor (originalEvent: Event) {\n this.originalEvent = originalEvent\n // duplicate the event so that currentTarget can be changed\n pExtend(this, originalEvent)\n }\n\n preventOriginalDefault () {\n this.originalEvent.preventDefault()\n }\n\n stopPropagation () {\n this.originalEvent.stopPropagation()\n }\n\n stopImmediatePropagation () {\n this.originalEvent.stopImmediatePropagation()\n }\n}\n\nfunction getOptions (param: { [index: string]: any } | boolean): { capture: boolean, passive: boolean } {\n if (!is.object(param)) {\n return { capture: !!param, passive: false }\n }\n\n const options = extend({}, param) as any\n\n options.capture = !!param.capture\n options.passive = !!param.passive\n\n return options\n}\n\nexport default {\n id: 'events',\n install,\n}\n","import type Interaction from '@interactjs/core/Interaction'\nimport type { Scope } from '@interactjs/core/scope'\nimport type { PointerType } from '@interactjs/types/index'\nimport * as dom from '@interactjs/utils/domUtils'\n\nexport interface SearchDetails {\n pointer: PointerType\n pointerId: number\n pointerType: string\n eventType: string\n eventTarget: EventTarget\n curEventTarget: EventTarget\n scope: Scope\n}\n\nconst finder = {\n methodOrder: ['simulationResume', 'mouseOrPen', 'hasPointer', 'idle'] as const,\n\n search (details: SearchDetails) {\n for (const method of finder.methodOrder) {\n const interaction = finder[method](details)\n\n if (interaction) {\n return interaction\n }\n }\n\n return null\n },\n\n // try to resume simulation with a new pointer\n simulationResume ({ pointerType, eventType, eventTarget, scope }: SearchDetails) {\n if (!/down|start/i.test(eventType)) {\n return null\n }\n\n for (const interaction of scope.interactions.list) {\n let element = eventTarget as Node\n\n if (\n interaction.simulation &&\n interaction.simulation.allowResume &&\n interaction.pointerType === pointerType\n ) {\n while (element) {\n // if the element is the interaction element\n if (element === interaction.element) {\n return interaction\n }\n element = dom.parentNode(element)\n }\n }\n }\n\n return null\n },\n\n // if it's a mouse or pen interaction\n mouseOrPen ({ pointerId, pointerType, eventType, scope }: SearchDetails) {\n if (pointerType !== 'mouse' && pointerType !== 'pen') {\n return null\n }\n\n let firstNonActive\n\n for (const interaction of scope.interactions.list) {\n if (interaction.pointerType === pointerType) {\n // if it's a down event, skip interactions with running simulations\n if (interaction.simulation && !hasPointerId(interaction, pointerId)) {\n continue\n }\n\n // if the interaction is active, return it immediately\n if (interaction.interacting()) {\n return interaction\n }\n // otherwise save it and look for another active interaction\n else if (!firstNonActive) {\n firstNonActive = interaction\n }\n }\n }\n\n // if no active mouse interaction was found use the first inactive mouse\n // interaction\n if (firstNonActive) {\n return firstNonActive\n }\n\n // find any mouse or pen interaction.\n // ignore the interaction if the eventType is a *down, and a simulation\n // is active\n for (const interaction of scope.interactions.list) {\n if (interaction.pointerType === pointerType && !(/down/i.test(eventType) && interaction.simulation)) {\n return interaction\n }\n }\n\n return null\n },\n\n // get interaction that has this pointer\n hasPointer ({ pointerId, scope }: SearchDetails) {\n for (const interaction of scope.interactions.list) {\n if (hasPointerId(interaction, pointerId)) {\n return interaction\n }\n }\n\n return null\n },\n\n // get first idle interaction with a matching pointerType\n idle ({ pointerType, scope }: SearchDetails) {\n for (const interaction of scope.interactions.list) {\n // if there's already a pointer held down\n if (interaction.pointers.length === 1) {\n const target = interaction.interactable\n // don't add this pointer if there is a target interactable and it\n // isn't gesturable\n if (target && !(target.options.gesture && target.options.gesture.enabled)) {\n continue\n }\n }\n // maximum of 2 pointers per interaction\n else if (interaction.pointers.length >= 2) {\n continue\n }\n\n if (!interaction.interacting() && pointerType === interaction.pointerType) {\n return interaction\n }\n }\n\n return null\n },\n}\n\nfunction hasPointerId (interaction: Interaction, pointerId: number) {\n return interaction.pointers.some(({ id }) => id === pointerId)\n}\n\nexport default finder\n","import type { Scope, ActionName, SignalArgs, Plugin } from '@interactjs/core/scope'\nimport type { Listener } from '@interactjs/types/index'\nimport browser from '@interactjs/utils/browser'\nimport domObjects from '@interactjs/utils/domObjects'\nimport { nodeContains } from '@interactjs/utils/domUtils'\nimport * as pointerUtils from '@interactjs/utils/pointerUtils'\n\nimport InteractionBase from './Interaction'\nimport interactablePreventDefault from './interactablePreventDefault'\nimport type { SearchDetails } from './interactionFinder'\nimport finder from './interactionFinder'\n\ndeclare module '@interactjs/core/scope' {\n interface Scope {\n Interaction: typeof InteractionBase\n interactions: {\n new: (options: any) => InteractionBase\n list: Array>\n listeners: { [type: string]: Listener }\n docEvents: Array<{ type: string, listener: Listener }>\n pointerMoveTolerance: number\n }\n prevTouchTime: number\n }\n}\n\ndeclare module '@interactjs/core/scope' {\n interface SignalArgs {\n 'interactions:find': {\n interaction: InteractionBase\n searchDetails: SearchDetails\n }\n }\n}\n\nconst methodNames = [\n 'pointerDown',\n 'pointerMove',\n 'pointerUp',\n 'updatePointer',\n 'removePointer',\n 'windowBlur',\n]\n\nfunction install (scope: Scope) {\n const listeners = {} as any\n\n for (const method of methodNames) {\n listeners[method] = doOnInteractions(method, scope)\n }\n\n const pEventTypes = browser.pEventTypes\n let docEvents: typeof scope.interactions.docEvents\n\n if (domObjects.PointerEvent) {\n docEvents = [\n { type: pEventTypes.down, listener: releasePointersOnRemovedEls },\n { type: pEventTypes.down, listener: listeners.pointerDown },\n { type: pEventTypes.move, listener: listeners.pointerMove },\n { type: pEventTypes.up, listener: listeners.pointerUp },\n { type: pEventTypes.cancel, listener: listeners.pointerUp },\n ]\n } else {\n docEvents = [\n { type: 'mousedown', listener: listeners.pointerDown },\n { type: 'mousemove', listener: listeners.pointerMove },\n { type: 'mouseup', listener: listeners.pointerUp },\n\n { type: 'touchstart', listener: releasePointersOnRemovedEls },\n { type: 'touchstart', listener: listeners.pointerDown },\n { type: 'touchmove', listener: listeners.pointerMove },\n { type: 'touchend', listener: listeners.pointerUp },\n { type: 'touchcancel', listener: listeners.pointerUp },\n ]\n }\n\n docEvents.push({\n type: 'blur',\n listener (event) {\n for (const interaction of scope.interactions.list) {\n interaction.documentBlur(event)\n }\n },\n })\n\n // for ignoring browser's simulated mouse events\n scope.prevTouchTime = 0\n\n scope.Interaction = class extends InteractionBase {\n get pointerMoveTolerance () {\n return scope.interactions.pointerMoveTolerance\n }\n\n set pointerMoveTolerance (value) {\n scope.interactions.pointerMoveTolerance = value\n }\n\n _now () {\n return scope.now()\n }\n }\n\n scope.interactions = {\n // all active and idle interactions\n list: [],\n new (options: { pointerType?: string, scopeFire?: Scope['fire'] }) {\n options.scopeFire = (name, arg) => scope.fire(name, arg)\n\n const interaction = new scope.Interaction(options as Required)\n\n scope.interactions.list.push(interaction)\n return interaction\n },\n listeners,\n docEvents,\n pointerMoveTolerance: 1,\n }\n\n function releasePointersOnRemovedEls () {\n // for all inactive touch interactions with pointers down\n for (const interaction of scope.interactions.list) {\n if (!interaction.pointerIsDown || interaction.pointerType !== 'touch' || interaction._interacting) {\n continue\n }\n\n // if a pointer is down on an element that is no longer in the DOM tree\n for (const pointer of interaction.pointers) {\n if (!scope.documents.some(({ doc }) => nodeContains(doc, pointer.downTarget))) {\n // remove the pointer from the interaction\n interaction.removePointer(pointer.pointer, pointer.event)\n }\n }\n }\n }\n\n scope.usePlugin(interactablePreventDefault)\n}\n\nfunction doOnInteractions (method: string, scope: Scope) {\n return function (event: Event) {\n const interactions = scope.interactions.list\n\n const pointerType = pointerUtils.getPointerType(event)\n const [eventTarget, curEventTarget] = pointerUtils.getEventTargets(event)\n const matches: any[] = [] // [ [pointer, interaction], ...]\n\n if (/^touch/.test(event.type)) {\n scope.prevTouchTime = scope.now()\n\n // @ts-expect-error\n for (const changedTouch of event.changedTouches) {\n const pointer = changedTouch\n const pointerId = pointerUtils.getPointerId(pointer)\n const searchDetails: SearchDetails = {\n pointer,\n pointerId,\n pointerType,\n eventType: event.type,\n eventTarget,\n curEventTarget,\n scope,\n }\n const interaction = getInteraction(searchDetails)\n\n matches.push([\n searchDetails.pointer,\n searchDetails.eventTarget,\n searchDetails.curEventTarget,\n interaction,\n ])\n }\n } else {\n let invalidPointer = false\n\n if (!browser.supportsPointerEvent && /mouse/.test(event.type)) {\n // ignore mouse events while touch interactions are active\n for (let i = 0; i < interactions.length && !invalidPointer; i++) {\n invalidPointer = interactions[i].pointerType !== 'mouse' && interactions[i].pointerIsDown\n }\n\n // try to ignore mouse events that are simulated by the browser\n // after a touch event\n invalidPointer =\n invalidPointer ||\n scope.now() - scope.prevTouchTime < 500 ||\n // on iOS and Firefox Mobile, MouseEvent.timeStamp is zero if simulated\n event.timeStamp === 0\n }\n\n if (!invalidPointer) {\n const searchDetails = {\n pointer: event as PointerEvent,\n pointerId: pointerUtils.getPointerId(event as PointerEvent),\n pointerType,\n eventType: event.type,\n curEventTarget,\n eventTarget,\n scope,\n }\n\n const interaction = getInteraction(searchDetails)\n\n matches.push([\n searchDetails.pointer,\n searchDetails.eventTarget,\n searchDetails.curEventTarget,\n interaction,\n ])\n }\n }\n\n // eslint-disable-next-line no-shadow\n for (const [pointer, eventTarget, curEventTarget, interaction] of matches) {\n interaction[method](pointer, event, eventTarget, curEventTarget)\n }\n }\n}\n\nfunction getInteraction (searchDetails: SearchDetails) {\n const { pointerType, scope } = searchDetails\n\n const foundInteraction = finder.search(searchDetails)\n const signalArg = { interaction: foundInteraction, searchDetails }\n\n scope.fire('interactions:find', signalArg)\n\n return signalArg.interaction || scope.interactions.new({ pointerType })\n}\n\nfunction onDocSignal (\n { doc, scope, options }: SignalArgs[T],\n eventMethodName: 'add' | 'remove',\n) {\n const {\n interactions: { docEvents },\n events,\n } = scope\n const eventMethod = events[eventMethodName]\n\n if (scope.browser.isIOS && !options.events) {\n options.events = { passive: false }\n }\n\n // delegate event listener\n for (const eventType in events.delegatedEvents) {\n eventMethod(doc, eventType, events.delegateListener)\n eventMethod(doc, eventType, events.delegateUseCapture, true)\n }\n\n const eventOptions = options && options.events\n\n for (const { type, listener } of docEvents) {\n eventMethod(doc, type, listener, eventOptions)\n }\n}\n\nconst interactions: Plugin = {\n id: 'core/interactions',\n install,\n listeners: {\n 'scope:add-document': (arg) => onDocSignal(arg, 'add'),\n 'scope:remove-document': (arg) => onDocSignal(arg, 'remove'),\n 'interactable:unset': ({ interactable }, scope) => {\n // Stop and destroy related interactions when an Interactable is unset\n for (let i = scope.interactions.list.length - 1; i >= 0; i--) {\n const interaction = scope.interactions.list[i]\n\n if (interaction.interactable !== interactable) {\n continue\n }\n\n interaction.stop()\n scope.fire('interactions:destroy', { interaction })\n interaction.destroy()\n\n if (scope.interactions.list.length > 2) {\n scope.interactions.list.splice(i, 1)\n }\n }\n },\n },\n onDocSignal,\n doOnInteractions,\n methodNames,\n}\n\nexport default interactions\n","import type Interaction from '@interactjs/core/Interaction'\nimport browser from '@interactjs/utils/browser'\nimport clone from '@interactjs/utils/clone'\nimport domObjects from '@interactjs/utils/domObjects'\nimport extend from '@interactjs/utils/extend'\nimport is from '@interactjs/utils/is'\nimport raf from '@interactjs/utils/raf'\nimport * as win from '@interactjs/utils/window'\n\nimport { Eventable } from './Eventable'\nimport type { PhaseMap } from './InteractEvent'\nimport { InteractEvent } from './InteractEvent'\nimport { createInteractStatic } from './InteractStatic'\nimport type { Interactable } from './Interactable'\nimport { Interactable as InteractableBase } from './Interactable'\nimport { InteractableSet } from './InteractableSet'\nimport events from './events'\nimport interactions from './interactions'\nimport type { OptionsArg } from './options'\nimport { defaults } from './options'\n\nexport interface SignalArgs {\n 'scope:add-document': DocSignalArg\n 'scope:remove-document': DocSignalArg\n 'interactable:unset': { interactable: InteractableBase }\n 'interactable:set': { interactable: InteractableBase, options: OptionsArg }\n 'interactions:destroy': { interaction: Interaction }\n}\n\nexport type ListenerName = keyof SignalArgs\n\nexport type ListenerMap = {\n [P in ListenerName]?: (arg: SignalArgs[P], scope: Scope, signalName: P) => void | boolean\n}\n\ninterface DocSignalArg {\n doc: Document\n window: Window\n scope: Scope\n options: Record\n}\n\n// eslint-disable-next-line @typescript-eslint/no-empty-interface\nexport interface ActionMap {}\nexport type ActionName = keyof ActionMap\n\nexport interface Actions {\n map: ActionMap\n phases: PhaseMap\n methodDict: { [P in ActionName]?: keyof Interactable }\n phaselessTypes: { [type: string]: true }\n}\n\nexport interface Plugin {\n [key: string]: any\n id?: string\n listeners?: ListenerMap\n before?: string[]\n install?(scope: Scope, options?: any): void\n}\n\nexport class Scope {\n id = `__interact_scope_${Math.floor(Math.random() * 100)}`\n isInitialized = false\n listenerMaps: Array<{\n map: ListenerMap\n id: string\n }> = []\n\n browser = browser\n defaults = clone(defaults) as typeof defaults\n Eventable = Eventable\n actions: Actions = {\n map: {},\n phases: {\n start: true,\n move: true,\n end: true,\n },\n methodDict: {},\n phaselessTypes: {},\n }\n\n interactStatic = createInteractStatic(this)\n InteractEvent = InteractEvent\n Interactable: typeof InteractableBase\n interactables = new InteractableSet(this)\n\n // main window\n _win!: Window\n\n // main document\n document!: Document\n\n // main window\n window!: Window\n\n // all documents being listened to\n documents: Array<{ doc: Document, options: any }> = []\n\n _plugins: {\n list: Plugin[]\n map: { [id: string]: Plugin }\n } = {\n list: [],\n map: {},\n }\n\n constructor () {\n const scope = this\n\n this.Interactable = class extends InteractableBase {\n get _defaults () {\n return scope.defaults\n }\n\n set (this: T, options: OptionsArg) {\n super.set(options)\n\n scope.fire('interactable:set', {\n options,\n interactable: this,\n })\n\n return this\n }\n\n unset (this: InteractableBase) {\n super.unset()\n scope.interactables.list.splice(scope.interactables.list.indexOf(this), 1)\n\n scope.fire('interactable:unset', { interactable: this })\n }\n }\n }\n\n addListeners (map: ListenerMap, id?: string) {\n this.listenerMaps.push({ id, map })\n }\n\n fire (name: T, arg: SignalArgs[T]): void | false {\n for (const {\n map: { [name]: listener },\n } of this.listenerMaps) {\n if (!!listener && listener(arg as any, this, name as never) === false) {\n return false\n }\n }\n }\n\n onWindowUnload = (event: BeforeUnloadEvent) => this.removeDocument(event.target as Document)\n\n init (window: Window | typeof globalThis) {\n return this.isInitialized ? this : initScope(this, window)\n }\n\n pluginIsInstalled (plugin: Plugin) {\n return this._plugins.map[plugin.id] || this._plugins.list.indexOf(plugin) !== -1\n }\n\n usePlugin (plugin: Plugin, options?: { [key: string]: any }) {\n if (!this.isInitialized) {\n return this\n }\n\n if (this.pluginIsInstalled(plugin)) {\n return this\n }\n\n if (plugin.id) {\n this._plugins.map[plugin.id] = plugin\n }\n this._plugins.list.push(plugin)\n\n if (plugin.install) {\n plugin.install(this, options)\n }\n\n if (plugin.listeners && plugin.before) {\n let index = 0\n const len = this.listenerMaps.length\n const before = plugin.before.reduce((acc, id) => {\n acc[id] = true\n acc[pluginIdRoot(id)] = true\n return acc\n }, {})\n\n for (; index < len; index++) {\n const otherId = this.listenerMaps[index].id\n\n if (before[otherId] || before[pluginIdRoot(otherId)]) {\n break\n }\n }\n\n this.listenerMaps.splice(index, 0, { id: plugin.id, map: plugin.listeners })\n } else if (plugin.listeners) {\n this.listenerMaps.push({ id: plugin.id, map: plugin.listeners })\n }\n\n return this\n }\n\n addDocument (doc: Document, options?: any): void | false {\n // do nothing if document is already known\n if (this.getDocIndex(doc) !== -1) {\n return false\n }\n\n const window = win.getWindow(doc)\n\n options = options ? extend({}, options) : {}\n\n this.documents.push({ doc, options })\n this.events.documents.push(doc)\n\n // don't add an unload event for the main document\n // so that the page may be cached in browser history\n if (doc !== this.document) {\n this.events.add(window, 'unload', this.onWindowUnload)\n }\n\n this.fire('scope:add-document', { doc, window, scope: this, options })\n }\n\n removeDocument (doc: Document) {\n const index = this.getDocIndex(doc)\n\n const window = win.getWindow(doc)\n const options = this.documents[index].options\n\n this.events.remove(window, 'unload', this.onWindowUnload)\n\n this.documents.splice(index, 1)\n this.events.documents.splice(index, 1)\n\n this.fire('scope:remove-document', { doc, window, scope: this, options })\n }\n\n getDocIndex (doc: Document) {\n for (let i = 0; i < this.documents.length; i++) {\n if (this.documents[i].doc === doc) {\n return i\n }\n }\n\n return -1\n }\n\n getDocOptions (doc: Document) {\n const docIndex = this.getDocIndex(doc)\n\n return docIndex === -1 ? null : this.documents[docIndex].options\n }\n\n now () {\n return (((this.window as any).Date as typeof Date) || Date).now()\n }\n}\n\nexport function initScope (scope: Scope, window: Window | typeof globalThis) {\n scope.isInitialized = true\n\n if (is.window(window)) {\n win.init(window)\n }\n\n domObjects.init(window)\n browser.init(window)\n raf.init(window)\n\n // @ts-expect-error\n scope.window = window\n scope.document = window.document\n\n scope.usePlugin(interactions)\n scope.usePlugin(events)\n\n return scope\n}\n\nfunction pluginIdRoot (id: string) {\n return id && id.replace(/\\/.*$/, '')\n}\n","import { Scope } from '@interactjs/core/scope'\n\nconst scope = new Scope()\n\nconst interact = scope.interactStatic\n\nexport default interact\n\nconst _global = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : this\nscope.init(_global)\n","export default () => {}\n","export default () => {}\n","import type { SnapFunction, SnapTarget } from '@interactjs/modifiers/snap/pointer'\nimport type { Rect, Point } from '@interactjs/types/index'\n\nexport type GridOptions = (Partial | Point) & {\n range?: number\n limits?: Rect\n offset?: Point\n}\n\nexport default (grid: GridOptions) => {\n const coordFields = ([\n ['x', 'y'],\n ['left', 'top'],\n ['right', 'bottom'],\n ['width', 'height'],\n ] as const).filter(([xField, yField]) => xField in grid || yField in grid)\n\n const gridFunc: SnapFunction & {\n grid: typeof grid\n coordFields: typeof coordFields\n } = (x, y) => {\n const {\n range,\n limits = {\n left: -Infinity,\n right: Infinity,\n top: -Infinity,\n bottom: Infinity,\n },\n offset = { x: 0, y: 0 },\n } = grid\n\n const result: SnapTarget & {\n grid: typeof grid\n } = { range, grid, x: null as number, y: null as number }\n\n for (const [xField, yField] of coordFields) {\n const gridx = Math.round((x - offset.x) / (grid as any)[xField])\n const gridy = Math.round((y - offset.y) / (grid as any)[yField])\n\n result[xField] = Math.max(limits.left, Math.min(limits.right, gridx * (grid as any)[xField] + offset.x))\n result[yField] = Math.max(limits.top, Math.min(limits.bottom, gridy * (grid as any)[yField] + offset.y))\n }\n\n return result\n }\n\n gridFunc.grid = grid\n gridFunc.coordFields = coordFields\n\n return gridFunc\n}\n","import type { Plugin } from '@interactjs/core/scope'\nimport extend from '@interactjs/utils/extend'\n\nimport * as allSnappers from './all'\n\ndeclare module '@interactjs/core/InteractStatic' {\n export interface InteractStatic {\n snappers: typeof allSnappers\n createSnapGrid: typeof allSnappers.grid\n }\n}\n\nconst snappersPlugin: Plugin = {\n id: 'snappers',\n install (scope) {\n const { interactStatic: interact } = scope\n\n interact.snappers = extend(interact.snappers || {}, allSnappers)\n interact.createSnapGrid = interact.snappers.grid\n },\n}\n\nexport default snappersPlugin\n","/**\n * @module modifiers/aspectRatio\n *\n * @description\n * This module forces elements to be resized with a specified dx/dy ratio.\n *\n * ```js\n * interact(target).resizable({\n * modifiers: [\n * interact.modifiers.snapSize({\n * targets: [ interact.snappers.grid({ x: 20, y: 20 }) ],\n * }),\n * interact.aspectRatio({ ratio: 'preserve' }),\n * ],\n * });\n * ```\n */\n\nimport type { Point, Rect, EdgeOptions } from '@interactjs/types/index'\nimport extend from '@interactjs/utils/extend'\nimport { addEdges } from '@interactjs/utils/rect'\n\nimport Modification from './Modification'\nimport type { Modifier, ModifierModule, ModifierState } from './base'\nimport { makeModifier } from './base'\n\nexport interface AspectRatioOptions {\n ratio?: number | 'preserve'\n equalDelta?: boolean\n modifiers?: Modifier[]\n enabled?: boolean\n}\n\nexport type AspectRatioState = ModifierState<\nAspectRatioOptions,\n{\n startCoords: Point\n startRect: Rect\n linkedEdges: EdgeOptions\n ratio: number\n equalDelta: boolean\n xIsPrimaryAxis: boolean\n edgeSign: 1 | -1\n subModification: Modification\n}\n>\n\nconst aspectRatio: ModifierModule = {\n start (arg) {\n const { state, rect, edges: originalEdges, pageCoords: coords } = arg\n let { ratio } = state.options\n const { equalDelta, modifiers } = state.options\n\n if (ratio === 'preserve') {\n ratio = rect.width / rect.height\n }\n\n state.startCoords = extend({}, coords)\n state.startRect = extend({}, rect)\n state.ratio = ratio\n state.equalDelta = equalDelta\n\n const linkedEdges = (state.linkedEdges = {\n top: originalEdges.top || (originalEdges.left && !originalEdges.bottom),\n left: originalEdges.left || (originalEdges.top && !originalEdges.right),\n bottom: originalEdges.bottom || (originalEdges.right && !originalEdges.top),\n right: originalEdges.right || (originalEdges.bottom && !originalEdges.left),\n })\n\n state.xIsPrimaryAxis = !!(originalEdges.left || originalEdges.right)\n\n if (state.equalDelta) {\n state.edgeSign = ((linkedEdges.left ? 1 : -1) * (linkedEdges.top ? 1 : -1)) as 1 | -1\n } else {\n const negativeSecondaryEdge = state.xIsPrimaryAxis ? linkedEdges.top : linkedEdges.left\n state.edgeSign = negativeSecondaryEdge ? -1 : 1\n }\n\n extend(arg.edges, linkedEdges)\n\n if (!modifiers || !modifiers.length) return\n\n const subModification = new Modification(arg.interaction)\n\n subModification.copyFrom(arg.interaction.modification)\n subModification.prepareStates(modifiers)\n\n state.subModification = subModification\n subModification.startAll({ ...arg })\n },\n\n set (arg) {\n const { state, rect, coords } = arg\n const initialCoords = extend({}, coords)\n const aspectMethod = state.equalDelta ? setEqualDelta : setRatio\n\n aspectMethod(state, state.xIsPrimaryAxis, coords, rect)\n\n if (!state.subModification) {\n return null\n }\n\n const correctedRect = extend({}, rect)\n\n addEdges(state.linkedEdges, correctedRect, {\n x: coords.x - initialCoords.x,\n y: coords.y - initialCoords.y,\n })\n\n const result = state.subModification.setAll({\n ...arg,\n rect: correctedRect,\n edges: state.linkedEdges,\n pageCoords: coords,\n prevCoords: coords,\n prevRect: correctedRect,\n })\n\n const { delta } = result\n\n if (result.changed) {\n const xIsCriticalAxis = Math.abs(delta.x) > Math.abs(delta.y)\n\n // do aspect modification again with critical edge axis as primary\n aspectMethod(state, xIsCriticalAxis, result.coords, result.rect)\n extend(coords, result.coords)\n }\n\n return result.eventProps\n },\n\n defaults: {\n ratio: 'preserve',\n equalDelta: false,\n modifiers: [],\n enabled: false,\n },\n}\n\nfunction setEqualDelta ({ startCoords, edgeSign }: AspectRatioState, xIsPrimaryAxis: boolean, coords: Point) {\n if (xIsPrimaryAxis) {\n coords.y = startCoords.y + (coords.x - startCoords.x) * edgeSign\n } else {\n coords.x = startCoords.x + (coords.y - startCoords.y) * edgeSign\n }\n}\n\nfunction setRatio (\n { startRect, startCoords, ratio, edgeSign }: AspectRatioState,\n xIsPrimaryAxis: boolean,\n coords: Point,\n rect: Rect,\n) {\n if (xIsPrimaryAxis) {\n const newHeight = rect.width / ratio\n\n coords.y = startCoords.y + (newHeight - startRect.height) * edgeSign\n } else {\n const newWidth = rect.height * ratio\n\n coords.x = startCoords.x + (newWidth - startRect.width) * edgeSign\n }\n}\n\nexport default makeModifier(aspectRatio, 'aspectRatio')\nexport { aspectRatio }\n","import type { ModifierFunction } from '@interactjs/modifiers/base'\n\nconst noop = ((() => {}) as unknown) as ModifierFunction\n\nnoop._defaults = {}\n\nexport default noop\n","import type Interaction from '@interactjs/core/Interaction'\nimport type { RectResolvable, Rect, Point } from '@interactjs/types/index'\nimport extend from '@interactjs/utils/extend'\nimport is from '@interactjs/utils/is'\nimport * as rectUtils from '@interactjs/utils/rect'\n\nimport type { ModifierArg, ModifierModule, ModifierState } from '../base'\nimport { makeModifier } from '../base'\n\nexport interface RestrictOptions {\n // where to drag over\n restriction: RectResolvable<[number, number, Interaction]>\n // what part of self is allowed to drag over\n elementRect: Rect\n offset: Rect\n // restrict just before the end drag\n endOnly: boolean\n enabled?: boolean\n}\n\nexport type RestrictState = ModifierState<\nRestrictOptions,\n{\n offset: Rect\n}\n>\n\nfunction start ({ rect, startOffset, state, interaction, pageCoords }: ModifierArg) {\n const { options } = state\n const { elementRect } = options\n const offset: Rect = extend(\n {\n left: 0,\n top: 0,\n right: 0,\n bottom: 0,\n },\n options.offset || {},\n )\n\n if (rect && elementRect) {\n const restriction = getRestrictionRect(options.restriction, interaction, pageCoords)\n\n if (restriction) {\n const widthDiff = restriction.right - restriction.left - rect.width\n const heightDiff = restriction.bottom - restriction.top - rect.height\n\n if (widthDiff < 0) {\n offset.left += widthDiff\n offset.right += widthDiff\n }\n if (heightDiff < 0) {\n offset.top += heightDiff\n offset.bottom += heightDiff\n }\n }\n\n offset.left += startOffset.left - rect.width * elementRect.left\n offset.top += startOffset.top - rect.height * elementRect.top\n\n offset.right += startOffset.right - rect.width * (1 - elementRect.right)\n offset.bottom += startOffset.bottom - rect.height * (1 - elementRect.bottom)\n }\n\n state.offset = offset\n}\n\nfunction set ({ coords, interaction, state }: ModifierArg) {\n const { options, offset } = state\n\n const restriction = getRestrictionRect(options.restriction, interaction, coords)\n\n if (!restriction) return\n\n const rect = rectUtils.xywhToTlbr(restriction)\n\n coords.x = Math.max(Math.min(rect.right - offset.right, coords.x), rect.left + offset.left)\n coords.y = Math.max(Math.min(rect.bottom - offset.bottom, coords.y), rect.top + offset.top)\n}\n\nexport function getRestrictionRect (\n value: RectResolvable<[number, number, Interaction]>,\n interaction: Interaction,\n coords?: Point,\n) {\n if (is.func(value)) {\n return rectUtils.resolveRectLike(value, interaction.interactable, interaction.element, [\n coords.x,\n coords.y,\n interaction,\n ])\n } else {\n return rectUtils.resolveRectLike(value, interaction.interactable, interaction.element)\n }\n}\n\nconst defaults: RestrictOptions = {\n restriction: null,\n elementRect: null,\n offset: null,\n endOnly: false,\n enabled: false,\n}\n\nconst restrict: ModifierModule = {\n start,\n set,\n defaults,\n}\n\nexport default makeModifier(restrict, 'restrict')\nexport { restrict }\n","// This module adds the options.resize.restrictEdges setting which sets min and\n// max for the top, left, bottom and right edges of the target being resized.\n//\n// interact(target).resize({\n// edges: { top: true, left: true },\n// restrictEdges: {\n// inner: { top: 200, left: 200, right: 400, bottom: 400 },\n// outer: { top: 0, left: 0, right: 600, bottom: 600 },\n// },\n// })\n\nimport type { Point, Rect } from '@interactjs/types/index'\nimport extend from '@interactjs/utils/extend'\nimport * as rectUtils from '@interactjs/utils/rect'\n\nimport type { ModifierArg, ModifierState } from '../base'\nimport { makeModifier } from '../base'\n\nimport type { RestrictOptions } from './pointer'\nimport { getRestrictionRect } from './pointer'\n\nexport interface RestrictEdgesOptions {\n inner: RestrictOptions['restriction']\n outer: RestrictOptions['restriction']\n offset?: RestrictOptions['offset']\n endOnly: boolean\n enabled?: boolean\n}\n\nexport type RestrictEdgesState = ModifierState<\nRestrictEdgesOptions,\n{\n inner: Rect\n outer: Rect\n offset: RestrictEdgesOptions['offset']\n}\n>\n\nconst noInner = { top: +Infinity, left: +Infinity, bottom: -Infinity, right: -Infinity }\nconst noOuter = { top: -Infinity, left: -Infinity, bottom: +Infinity, right: +Infinity }\n\nfunction start ({ interaction, startOffset, state }: ModifierArg) {\n const { options } = state\n let offset: Point\n\n if (options) {\n const offsetRect = getRestrictionRect(options.offset, interaction, interaction.coords.start.page)\n\n offset = rectUtils.rectToXY(offsetRect)\n }\n\n offset = offset || { x: 0, y: 0 }\n\n state.offset = {\n top: offset.y + startOffset.top,\n left: offset.x + startOffset.left,\n bottom: offset.y - startOffset.bottom,\n right: offset.x - startOffset.right,\n }\n}\n\nfunction set ({ coords, edges, interaction, state }: ModifierArg) {\n const { offset, options } = state\n\n if (!edges) {\n return\n }\n\n const page = extend({}, coords)\n const inner = getRestrictionRect(options.inner, interaction, page) || ({} as Rect)\n const outer = getRestrictionRect(options.outer, interaction, page) || ({} as Rect)\n\n fixRect(inner, noInner)\n fixRect(outer, noOuter)\n\n if (edges.top) {\n coords.y = Math.min(Math.max(outer.top + offset.top, page.y), inner.top + offset.top)\n } else if (edges.bottom) {\n coords.y = Math.max(Math.min(outer.bottom + offset.bottom, page.y), inner.bottom + offset.bottom)\n }\n if (edges.left) {\n coords.x = Math.min(Math.max(outer.left + offset.left, page.x), inner.left + offset.left)\n } else if (edges.right) {\n coords.x = Math.max(Math.min(outer.right + offset.right, page.x), inner.right + offset.right)\n }\n}\n\nfunction fixRect (rect: Rect, defaults: Rect) {\n for (const edge of ['top', 'left', 'bottom', 'right']) {\n if (!(edge in rect)) {\n rect[edge] = defaults[edge]\n }\n }\n\n return rect\n}\n\nconst defaults: RestrictEdgesOptions = {\n inner: null,\n outer: null,\n offset: null,\n endOnly: false,\n enabled: false,\n}\n\nconst restrictEdges = {\n noInner,\n noOuter,\n start,\n set,\n defaults,\n}\n\nexport default makeModifier(restrictEdges, 'restrictEdges')\nexport { restrictEdges }\n","import extend from '@interactjs/utils/extend'\n\nimport { makeModifier } from '../base'\n\nimport { restrict } from './pointer'\n\nconst defaults = extend(\n {\n get elementRect () {\n return { top: 0, left: 0, bottom: 1, right: 1 }\n },\n set elementRect (_) {},\n },\n restrict.defaults,\n)\n\nconst restrictRect = {\n start: restrict.start,\n set: restrict.set,\n defaults,\n}\n\nexport default makeModifier(restrictRect, 'restrictRect')\nexport { restrictRect }\n","import type { Point, Rect, Size } from '@interactjs/types/index'\nimport extend from '@interactjs/utils/extend'\nimport * as rectUtils from '@interactjs/utils/rect'\n\nimport type { ModifierArg, ModifierState } from '../base'\nimport { makeModifier } from '../base'\n\nimport type { RestrictEdgesState } from './edges'\nimport { restrictEdges } from './edges'\nimport type { RestrictOptions } from './pointer'\nimport { getRestrictionRect } from './pointer'\n\nconst noMin = { width: -Infinity, height: -Infinity }\nconst noMax = { width: +Infinity, height: +Infinity }\n\nexport interface RestrictSizeOptions {\n min?: Size | Point | RestrictOptions['restriction']\n max?: Size | Point | RestrictOptions['restriction']\n endOnly: boolean\n enabled?: boolean\n}\n\nfunction start (arg: ModifierArg) {\n return restrictEdges.start(arg)\n}\n\nexport type RestrictSizeState = RestrictEdgesState &\nModifierState<\nRestrictSizeOptions & { inner: Rect, outer: Rect },\n{\n min: Rect\n max: Rect\n}\n>\n\nfunction set (arg: ModifierArg) {\n const { interaction, state, rect, edges } = arg\n const { options } = state\n\n if (!edges) {\n return\n }\n\n const minSize =\n rectUtils.tlbrToXywh(getRestrictionRect(options.min as any, interaction, arg.coords)) || noMin\n const maxSize =\n rectUtils.tlbrToXywh(getRestrictionRect(options.max as any, interaction, arg.coords)) || noMax\n\n state.options = {\n endOnly: options.endOnly,\n inner: extend({}, restrictEdges.noInner),\n outer: extend({}, restrictEdges.noOuter),\n }\n\n if (edges.top) {\n state.options.inner.top = rect.bottom - minSize.height\n state.options.outer.top = rect.bottom - maxSize.height\n } else if (edges.bottom) {\n state.options.inner.bottom = rect.top + minSize.height\n state.options.outer.bottom = rect.top + maxSize.height\n }\n if (edges.left) {\n state.options.inner.left = rect.right - minSize.width\n state.options.outer.left = rect.right - maxSize.width\n } else if (edges.right) {\n state.options.inner.right = rect.left + minSize.width\n state.options.outer.right = rect.left + maxSize.width\n }\n\n restrictEdges.set(arg)\n\n state.options = options\n}\n\nconst defaults: RestrictSizeOptions = {\n min: null,\n max: null,\n endOnly: false,\n enabled: false,\n}\n\nconst restrictSize = {\n start,\n set,\n defaults,\n}\n\nexport default makeModifier(restrictSize, 'restrictSize')\nexport { restrictSize }\n","import type { Interaction, InteractionProxy } from '@interactjs/core/Interaction'\nimport type { ActionName } from '@interactjs/core/scope'\nimport type { Point, RectResolvable, Element } from '@interactjs/types/index'\nimport extend from '@interactjs/utils/extend'\nimport getOriginXY from '@interactjs/utils/getOriginXY'\nimport hypot from '@interactjs/utils/hypot'\nimport is from '@interactjs/utils/is'\nimport { resolveRectLike, rectToXY } from '@interactjs/utils/rect'\n\nimport type { ModifierArg, ModifierState } from '../base'\nimport { makeModifier } from '../base'\n\nexport interface Offset {\n x: number\n y: number\n index: number\n relativePoint?: Point | null\n}\n\nexport interface SnapPosition {\n x?: number\n y?: number\n range?: number\n offset?: Offset\n [index: string]: any\n}\n\nexport type SnapFunction = (\n x: number,\n y: number,\n interaction: InteractionProxy,\n offset: Offset,\n index: number,\n) => SnapPosition\nexport type SnapTarget = SnapPosition | SnapFunction\nexport interface SnapOptions {\n targets: SnapTarget[] | null\n // target range\n range: number\n // self points for snapping. [0,0] = top left, [1,1] = bottom right\n relativePoints: Point[] | null\n // startCoords = offset snapping from drag start page position\n offset: Point | RectResolvable<[Interaction]> | 'startCoords' | null\n offsetWithOrigin?: boolean\n origin: RectResolvable<[Element]> | Point | null\n endOnly?: boolean\n enabled?: boolean\n}\n\nexport type SnapState = ModifierState<\nSnapOptions,\n{\n offsets?: Offset[]\n closest?: any\n targetFields?: string[][]\n}\n>\n\nfunction start (arg: ModifierArg) {\n const { interaction, interactable, element, rect, state, startOffset } = arg\n const { options } = state\n const origin = options.offsetWithOrigin ? getOrigin(arg) : { x: 0, y: 0 }\n\n let snapOffset: Point\n\n if (options.offset === 'startCoords') {\n snapOffset = {\n x: interaction.coords.start.page.x,\n y: interaction.coords.start.page.y,\n }\n } else {\n const offsetRect = resolveRectLike(options.offset as any, interactable, element, [interaction])\n\n snapOffset = rectToXY(offsetRect) || { x: 0, y: 0 }\n snapOffset.x += origin.x\n snapOffset.y += origin.y\n }\n\n const { relativePoints } = options\n\n state.offsets =\n rect && relativePoints && relativePoints.length\n ? relativePoints.map((relativePoint, index) => ({\n index,\n relativePoint,\n x: startOffset.left - rect.width * relativePoint.x + snapOffset.x,\n y: startOffset.top - rect.height * relativePoint.y + snapOffset.y,\n }))\n : [\n {\n index: 0,\n relativePoint: null,\n x: snapOffset.x,\n y: snapOffset.y,\n },\n ]\n}\n\nfunction set (arg: ModifierArg) {\n const { interaction, coords, state } = arg\n const { options, offsets } = state\n\n const origin = getOriginXY(interaction.interactable, interaction.element, interaction.prepared.name)\n const page = extend({}, coords)\n const targets = []\n\n if (!options.offsetWithOrigin) {\n page.x -= origin.x\n page.y -= origin.y\n }\n\n for (const offset of offsets) {\n const relativeX = page.x - offset.x\n const relativeY = page.y - offset.y\n\n for (let index = 0, len = options.targets.length; index < len; index++) {\n const snapTarget = options.targets[index]\n let target: SnapPosition\n\n if (is.func(snapTarget)) {\n target = snapTarget(relativeX, relativeY, interaction._proxy, offset, index)\n } else {\n target = snapTarget\n }\n\n if (!target) {\n continue\n }\n\n targets.push({\n x: (is.number(target.x) ? target.x : relativeX) + offset.x,\n y: (is.number(target.y) ? target.y : relativeY) + offset.y,\n\n range: is.number(target.range) ? target.range : options.range,\n source: snapTarget,\n index,\n offset,\n })\n }\n }\n\n const closest = {\n target: null,\n inRange: false,\n distance: 0,\n range: 0,\n delta: { x: 0, y: 0 },\n }\n\n for (const target of targets) {\n const range = target.range\n const dx = target.x - page.x\n const dy = target.y - page.y\n const distance = hypot(dx, dy)\n let inRange = distance <= range\n\n // Infinite targets count as being out of range\n // compared to non infinite ones that are in range\n if (range === Infinity && closest.inRange && closest.range !== Infinity) {\n inRange = false\n }\n\n if (\n !closest.target ||\n (inRange\n ? // is the closest target in range?\n closest.inRange && range !== Infinity\n ? // the pointer is relatively deeper in this target\n distance / range < closest.distance / closest.range\n : // this target has Infinite range and the closest doesn't\n (range === Infinity && closest.range !== Infinity) ||\n // OR this target is closer that the previous closest\n distance < closest.distance\n : // The other is not in range and the pointer is closer to this target\n !closest.inRange && distance < closest.distance)\n ) {\n closest.target = target\n closest.distance = distance\n closest.range = range\n closest.inRange = inRange\n closest.delta.x = dx\n closest.delta.y = dy\n }\n }\n\n if (closest.inRange) {\n coords.x = closest.target.x\n coords.y = closest.target.y\n }\n\n state.closest = closest\n return closest\n}\n\nfunction getOrigin (arg: Partial>) {\n const { element } = arg.interaction\n const optionsOrigin = rectToXY(resolveRectLike(arg.state.options.origin as any, null, null, [element]))\n const origin = optionsOrigin || getOriginXY(arg.interactable, element, arg.interaction.prepared.name)\n\n return origin\n}\n\nconst defaults: SnapOptions = {\n range: Infinity,\n targets: null,\n offset: null,\n offsetWithOrigin: true,\n origin: null,\n relativePoints: null,\n endOnly: false,\n enabled: false,\n}\nconst snap = {\n start,\n set,\n defaults,\n}\n\nexport default makeModifier(snap, 'snap')\nexport { snap }\n","// This module allows snapping of the size of targets during resize\n// interactions.\n\nimport extend from '@interactjs/utils/extend'\nimport is from '@interactjs/utils/is'\n\nimport type { ModifierArg } from '../base'\nimport { makeModifier } from '../base'\n\nimport type { SnapOptions, SnapState } from './pointer'\nimport { snap } from './pointer'\n\nexport type SnapSizeOptions = Pick\n\nfunction start (arg: ModifierArg) {\n const { state, edges } = arg\n const { options } = state\n\n if (!edges) {\n return null\n }\n\n arg.state = {\n options: {\n targets: null,\n relativePoints: [\n {\n x: edges.left ? 0 : 1,\n y: edges.top ? 0 : 1,\n },\n ],\n offset: options.offset || 'self',\n origin: { x: 0, y: 0 },\n range: options.range,\n },\n }\n\n state.targetFields = state.targetFields || [\n ['width', 'height'],\n ['x', 'y'],\n ]\n\n snap.start(arg)\n state.offsets = arg.state.offsets\n\n arg.state = state\n}\n\nfunction set (arg) {\n const { interaction, state, coords } = arg\n const { options, offsets } = state\n const relative = {\n x: coords.x - offsets[0].x,\n y: coords.y - offsets[0].y,\n }\n\n state.options = extend({}, options)\n state.options.targets = []\n\n for (const snapTarget of options.targets || []) {\n let target\n\n if (is.func(snapTarget)) {\n target = snapTarget(relative.x, relative.y, interaction)\n } else {\n target = snapTarget\n }\n\n if (!target) {\n continue\n }\n\n for (const [xField, yField] of state.targetFields) {\n if (xField in target || yField in target) {\n target.x = target[xField]\n target.y = target[yField]\n\n break\n }\n }\n\n state.options.targets.push(target)\n }\n\n const returnValue = snap.set(arg)\n\n state.options = options\n\n return returnValue\n}\n\nconst defaults: SnapSizeOptions = {\n range: Infinity,\n targets: null,\n offset: null,\n endOnly: false,\n enabled: false,\n}\n\nconst snapSize = {\n start,\n set,\n defaults,\n}\n\nexport default makeModifier(snapSize, 'snapSize')\nexport { snapSize }\n","/**\n * @module modifiers/snapEdges\n *\n * @description\n * WOW> This module allows snapping of the edges of targets during resize\n * interactions.\n *\n * ```js\n * interact(target).resizable({\n * snapEdges: {\n * targets: [interact.snappers.grid({ x: 100, y: 50 })],\n * },\n * })\n *\n * interact(target).resizable({\n * snapEdges: {\n * targets: [\n * interact.snappers.grid({\n * top: 50,\n * left: 50,\n * bottom: 100,\n * right: 100,\n * }),\n * ],\n * },\n * })\n * ```\n */\n\nimport clone from '@interactjs/utils/clone'\nimport extend from '@interactjs/utils/extend'\n\nimport type { ModifierArg, ModifierModule } from '../base'\nimport { makeModifier } from '../base'\n\nimport type { SnapOptions, SnapState } from './pointer'\nimport { snapSize } from './size'\n\nexport type SnapEdgesOptions = Pick\n\nfunction start (arg: ModifierArg) {\n const { edges } = arg\n\n if (!edges) {\n return null\n }\n\n arg.state.targetFields = arg.state.targetFields || [\n [edges.left ? 'left' : 'right', edges.top ? 'top' : 'bottom'],\n ]\n\n return snapSize.start(arg)\n}\n\nconst snapEdges: ModifierModule> = {\n start,\n set: snapSize.set,\n defaults: extend(clone(snapSize.defaults), {\n targets: null,\n range: null,\n offset: { x: 0, y: 0 },\n } as const),\n}\n\nexport default makeModifier(snapEdges, 'snapEdges')\nexport { snapEdges }\n","/* eslint-disable node/no-extraneous-import, import/no-unresolved */\nimport aspectRatio from './aspectRatio'\nimport avoid from './avoid/avoid'\nimport restrictEdges from './restrict/edges'\nimport restrict from './restrict/pointer'\nimport restrictRect from './restrict/rect'\nimport restrictSize from './restrict/size'\nimport rubberband from './rubberband/rubberband'\nimport snapEdges from './snap/edges'\nimport snap from './snap/pointer'\nimport snapSize from './snap/size'\nimport spring from './spring/spring'\nimport transform from './transform/transform'\n\nexport default {\n aspectRatio,\n restrictEdges,\n restrict,\n restrictRect,\n restrictSize,\n snapEdges,\n snap,\n snapSize,\n\n spring,\n avoid,\n transform,\n rubberband,\n}\n","import type { Plugin } from '@interactjs/core/scope'\nimport snappers from '@interactjs/snappers/plugin'\n\nimport all from './all'\nimport base from './base'\n\ndeclare module '@interactjs/core/InteractStatic' {\n export interface InteractStatic {\n modifiers: typeof all\n }\n}\n\nconst modifiers: Plugin = {\n id: 'modifiers',\n install (scope) {\n const { interactStatic: interact } = scope\n\n scope.usePlugin(base)\n scope.usePlugin(snappers)\n\n interact.modifiers = all\n\n // for backwrads compatibility\n for (const type in all) {\n const { _defaults, _methods } = all[type as keyof typeof all]\n\n ;(_defaults as any)._methods = _methods\n ;(scope.defaults.perAction as any)[type] = _defaults\n }\n },\n}\n\nexport default modifiers\n","import { BaseEvent } from '@interactjs/core/BaseEvent'\nimport type Interaction from '@interactjs/core/Interaction'\nimport type { PointerEventType, PointerType, Point } from '@interactjs/types/index'\nimport * as pointerUtils from '@interactjs/utils/pointerUtils'\n\nexport default class PointerEvent extends BaseEvent {\n type: T\n originalEvent: PointerEventType\n pointerId: number\n pointerType: string\n double: boolean\n pageX: number\n pageY: number\n clientX: number\n clientY: number\n dt: number\n eventable: any;\n [key: string]: any\n\n /** */\n constructor (\n type: T,\n pointer: PointerType | PointerEvent,\n event: PointerEventType,\n eventTarget: Node,\n interaction: Interaction,\n timeStamp: number,\n ) {\n super(interaction)\n pointerUtils.pointerExtend(this, event)\n\n if (event !== pointer) {\n pointerUtils.pointerExtend(this, pointer)\n }\n\n this.timeStamp = timeStamp\n this.originalEvent = event\n this.type = type\n this.pointerId = pointerUtils.getPointerId(pointer)\n this.pointerType = pointerUtils.getPointerType(pointer)\n this.target = eventTarget\n this.currentTarget = null\n\n if (type === 'tap') {\n const pointerIndex = interaction.getPointerIndex(pointer)\n this.dt = this.timeStamp - interaction.pointers[pointerIndex].downTime\n\n const interval = this.timeStamp - interaction.tapTime\n\n this.double = !!(\n interaction.prevTap &&\n interaction.prevTap.type !== 'doubletap' &&\n interaction.prevTap.target === this.target &&\n interval < 500\n )\n } else if (type === 'doubletap') {\n this.dt = (pointer as PointerEvent<'tap'>).timeStamp - interaction.tapTime\n }\n }\n\n _subtractOrigin ({ x: originX, y: originY }: Point) {\n this.pageX -= originX\n this.pageY -= originY\n this.clientX -= originX\n this.clientY -= originY\n\n return this\n }\n\n _addOrigin ({ x: originX, y: originY }: Point) {\n this.pageX += originX\n this.pageY += originY\n this.clientX += originX\n this.clientY += originY\n\n return this\n }\n\n /**\n * Prevent the default behaviour of the original Event\n */\n preventDefault () {\n this.originalEvent.preventDefault()\n }\n}\n\nexport { PointerEvent }\n","import type { Eventable } from '@interactjs/core/Eventable'\nimport type { Interaction } from '@interactjs/core/Interaction'\nimport type { PerActionDefaults } from '@interactjs/core/options'\nimport type { Scope, SignalArgs, Plugin } from '@interactjs/core/scope'\nimport type { Point, PointerType, PointerEventType, Element } from '@interactjs/types'\nimport * as domUtils from '@interactjs/utils/domUtils'\nimport extend from '@interactjs/utils/extend'\nimport getOriginXY from '@interactjs/utils/getOriginXY'\n\nimport { PointerEvent } from './PointerEvent'\n\nexport type EventTargetList = Array<{\n node: Node\n eventable: Eventable\n props: { [key: string]: any }\n}>\n\nexport interface PointerEventOptions extends PerActionDefaults {\n enabled?: undefined // not used\n holdDuration?: number\n ignoreFrom?: any\n allowFrom?: any\n origin?: Point | string | Element\n}\n\ndeclare module '@interactjs/core/scope' {\n interface Scope {\n pointerEvents: typeof pointerEvents\n }\n}\n\ndeclare module '@interactjs/core/Interaction' {\n interface Interaction {\n prevTap?: PointerEvent\n tapTime?: number\n }\n}\n\ndeclare module '@interactjs/core/PointerInfo' {\n interface PointerInfo {\n hold?: {\n duration: number\n timeout: any\n }\n }\n}\n\ndeclare module '@interactjs/core/options' {\n interface ActionDefaults {\n pointerEvents: Options\n }\n}\n\ndeclare module '@interactjs/core/scope' {\n interface SignalArgs {\n 'pointerEvents:new': { pointerEvent: PointerEvent }\n 'pointerEvents:fired': {\n interaction: Interaction\n pointer: PointerType | PointerEvent\n event: PointerEventType | PointerEvent\n eventTarget: Node\n pointerEvent: PointerEvent\n targets?: EventTargetList\n type: string\n }\n 'pointerEvents:collect-targets': {\n interaction: Interaction\n pointer: PointerType | PointerEvent\n event: PointerEventType | PointerEvent\n eventTarget: Node\n targets?: EventTargetList\n type: string\n path: Node[]\n node: null\n }\n }\n}\n\nconst defaults: PointerEventOptions = {\n holdDuration: 600,\n ignoreFrom: null,\n allowFrom: null,\n origin: { x: 0, y: 0 },\n}\n\nconst pointerEvents: Plugin = {\n id: 'pointer-events/base',\n before: ['inertia', 'modifiers', 'auto-start', 'actions'],\n install,\n listeners: {\n 'interactions:new': addInteractionProps,\n 'interactions:update-pointer': addHoldInfo,\n 'interactions:move': moveAndClearHold,\n 'interactions:down': (arg, scope) => {\n downAndStartHold(arg, scope)\n fire(arg, scope)\n },\n 'interactions:up': (arg, scope) => {\n clearHold(arg)\n fire(arg, scope)\n tapAfterUp(arg, scope)\n },\n 'interactions:cancel': (arg, scope) => {\n clearHold(arg)\n fire(arg, scope)\n },\n },\n PointerEvent,\n fire,\n collectEventTargets,\n defaults,\n types: {\n down: true,\n move: true,\n up: true,\n cancel: true,\n tap: true,\n doubletap: true,\n hold: true,\n } as { [type: string]: true },\n}\n\nfunction fire (\n arg: {\n pointer: PointerType | PointerEvent\n event: PointerEventType | PointerEvent\n eventTarget: Node\n interaction: Interaction\n type: T\n targets?: EventTargetList\n },\n scope: Scope,\n) {\n const { interaction, pointer, event, eventTarget, type, targets = collectEventTargets(arg, scope) } = arg\n\n const pointerEvent = new PointerEvent(type, pointer, event, eventTarget, interaction, scope.now())\n\n scope.fire('pointerEvents:new', { pointerEvent })\n\n const signalArg = {\n interaction,\n pointer,\n event,\n eventTarget,\n targets,\n type,\n pointerEvent,\n }\n\n for (let i = 0; i < targets.length; i++) {\n const target = targets[i]\n\n for (const prop in target.props || {}) {\n ;(pointerEvent as any)[prop] = target.props[prop]\n }\n\n const origin = getOriginXY(target.eventable, target.node)\n\n pointerEvent._subtractOrigin(origin)\n pointerEvent.eventable = target.eventable\n pointerEvent.currentTarget = target.node\n\n target.eventable.fire(pointerEvent)\n\n pointerEvent._addOrigin(origin)\n\n if (\n pointerEvent.immediatePropagationStopped ||\n (pointerEvent.propagationStopped &&\n i + 1 < targets.length &&\n targets[i + 1].node !== pointerEvent.currentTarget)\n ) {\n break\n }\n }\n\n scope.fire('pointerEvents:fired', signalArg)\n\n if (type === 'tap') {\n // if pointerEvent should make a double tap, create and fire a doubletap\n // PointerEvent and use that as the prevTap\n const prevTap = pointerEvent.double\n ? fire(\n {\n interaction,\n pointer,\n event,\n eventTarget,\n type: 'doubletap',\n },\n scope,\n )\n : pointerEvent\n\n interaction.prevTap = prevTap\n interaction.tapTime = prevTap.timeStamp\n }\n\n return pointerEvent\n}\n\nfunction collectEventTargets (\n {\n interaction,\n pointer,\n event,\n eventTarget,\n type,\n }: {\n interaction: Interaction\n pointer: PointerType | PointerEvent\n event: PointerEventType | PointerEvent\n eventTarget: Node\n type: T\n },\n scope: Scope,\n) {\n const pointerIndex = interaction.getPointerIndex(pointer)\n const pointerInfo = interaction.pointers[pointerIndex]\n\n // do not fire a tap event if the pointer was moved before being lifted\n if (\n type === 'tap' &&\n (interaction.pointerWasMoved ||\n // or if the pointerup target is different to the pointerdown target\n !(pointerInfo && pointerInfo.downTarget === eventTarget))\n ) {\n return []\n }\n\n const path = domUtils.getPath(eventTarget as Element | Document)\n const signalArg = {\n interaction,\n pointer,\n event,\n eventTarget,\n type,\n path,\n targets: [] as EventTargetList,\n node: null,\n }\n\n for (const node of path) {\n signalArg.node = node\n\n scope.fire('pointerEvents:collect-targets', signalArg)\n }\n\n if (type === 'hold') {\n signalArg.targets = signalArg.targets.filter(\n (target) => target.eventable.options.holdDuration === interaction.pointers[pointerIndex]?.hold.duration,\n )\n }\n\n return signalArg.targets\n}\n\nfunction addInteractionProps ({ interaction }) {\n interaction.prevTap = null // the most recent tap event on this interaction\n interaction.tapTime = 0 // time of the most recent tap event\n}\n\nfunction addHoldInfo ({ down, pointerInfo }: SignalArgs['interactions:update-pointer']) {\n if (!down && pointerInfo.hold) {\n return\n }\n\n pointerInfo.hold = { duration: Infinity, timeout: null }\n}\n\nfunction clearHold ({ interaction, pointerIndex }) {\n const hold = interaction.pointers[pointerIndex].hold\n\n if (hold && hold.timeout) {\n clearTimeout(hold.timeout)\n hold.timeout = null\n }\n}\n\nfunction moveAndClearHold (arg: SignalArgs['interactions:move'], scope: Scope) {\n const { interaction, pointer, event, eventTarget, duplicate } = arg\n\n if (!duplicate && (!interaction.pointerIsDown || interaction.pointerWasMoved)) {\n if (interaction.pointerIsDown) {\n clearHold(arg)\n }\n\n fire(\n {\n interaction,\n pointer,\n event,\n eventTarget: eventTarget as Element,\n type: 'move',\n },\n scope,\n )\n }\n}\n\nfunction downAndStartHold (\n { interaction, pointer, event, eventTarget, pointerIndex }: SignalArgs['interactions:down'],\n scope: Scope,\n) {\n const timer = interaction.pointers[pointerIndex].hold\n const path = domUtils.getPath(eventTarget as Element | Document)\n const signalArg = {\n interaction,\n pointer,\n event,\n eventTarget,\n type: 'hold',\n targets: [] as EventTargetList,\n path,\n node: null,\n }\n\n for (const node of path) {\n signalArg.node = node\n\n scope.fire('pointerEvents:collect-targets', signalArg)\n }\n\n if (!signalArg.targets.length) return\n\n let minDuration = Infinity\n\n for (const target of signalArg.targets) {\n const holdDuration = target.eventable.options.holdDuration\n\n if (holdDuration < minDuration) {\n minDuration = holdDuration\n }\n }\n\n timer.duration = minDuration\n timer.timeout = setTimeout(() => {\n fire(\n {\n interaction,\n eventTarget,\n pointer,\n event,\n type: 'hold',\n },\n scope,\n )\n }, minDuration)\n}\n\nfunction tapAfterUp (\n { interaction, pointer, event, eventTarget }: SignalArgs['interactions:up'],\n scope: Scope,\n) {\n if (!interaction.pointerWasMoved) {\n fire({ interaction, eventTarget, pointer, event, type: 'tap' }, scope)\n }\n}\n\nfunction install (scope: Scope) {\n scope.pointerEvents = pointerEvents\n scope.defaults.actions.pointerEvents = pointerEvents.defaults\n extend(scope.actions.phaselessTypes, pointerEvents.types)\n}\n\nexport default pointerEvents\n","import type Interaction from '@interactjs/core/Interaction'\nimport type { ListenerMap, Scope, SignalArgs, Plugin } from '@interactjs/core/scope'\n\nimport type PointerEvent from './PointerEvent'\nimport basePlugin from './base'\n\ndeclare module '@interactjs/core/Interaction' {\n interface Interaction {\n holdIntervalHandle?: any\n }\n}\n\ndeclare module '@interactjs/pointer-events/PointerEvent' {\n interface PointerEvent {\n count?: number\n }\n}\n\ndeclare module '@interactjs/pointer-events/base' {\n interface PointerEventOptions {\n holdRepeatInterval?: number\n }\n}\n\nfunction install (scope: Scope) {\n scope.usePlugin(basePlugin)\n\n const { pointerEvents } = scope\n\n // don't repeat by default\n pointerEvents.defaults.holdRepeatInterval = 0\n pointerEvents.types.holdrepeat = scope.actions.phaselessTypes.holdrepeat = true\n}\n\nfunction onNew ({ pointerEvent }: { pointerEvent: PointerEvent }) {\n if (pointerEvent.type !== 'hold') return\n\n pointerEvent.count = (pointerEvent.count || 0) + 1\n}\n\nfunction onFired (\n { interaction, pointerEvent, eventTarget, targets }: SignalArgs['pointerEvents:fired'],\n scope: Scope,\n) {\n if (pointerEvent.type !== 'hold' || !targets.length) return\n\n // get the repeat interval from the first eventable\n const interval = targets[0].eventable.options.holdRepeatInterval\n\n // don't repeat if the interval is 0 or less\n if (interval <= 0) return\n\n // set a timeout to fire the holdrepeat event\n interaction.holdIntervalHandle = setTimeout(() => {\n scope.pointerEvents.fire(\n {\n interaction,\n eventTarget,\n type: 'hold',\n pointer: pointerEvent,\n event: pointerEvent,\n },\n scope,\n )\n }, interval)\n}\n\nfunction endHoldRepeat ({ interaction }: { interaction: Interaction }) {\n // set the interaction's holdStopTime property\n // to stop further holdRepeat events\n if (interaction.holdIntervalHandle) {\n clearInterval(interaction.holdIntervalHandle)\n interaction.holdIntervalHandle = null\n }\n}\n\nconst holdRepeat: Plugin = {\n id: 'pointer-events/holdRepeat',\n install,\n listeners: ['move', 'up', 'cancel', 'endall'].reduce(\n (acc, enderTypes) => {\n ;(acc as any)[`pointerEvents:${enderTypes}`] = endHoldRepeat\n return acc\n },\n {\n 'pointerEvents:new': onNew,\n 'pointerEvents:fired': onFired,\n } as ListenerMap,\n ),\n}\n\nexport default holdRepeat\n","import type { Interactable } from '@interactjs/core/Interactable'\nimport type { Scope, Plugin } from '@interactjs/core/scope'\nimport type { Element } from '@interactjs/types/index'\nimport extend from '@interactjs/utils/extend'\n\ndeclare module '@interactjs/core/Interactable' {\n interface Interactable {\n pointerEvents: typeof pointerEventsMethod\n __backCompatOption: (optionName: string, newValue: any) => any\n }\n}\n\nfunction install (scope: Scope) {\n const { Interactable } = scope\n\n Interactable.prototype.pointerEvents = pointerEventsMethod\n\n const __backCompatOption = Interactable.prototype._backCompatOption\n\n Interactable.prototype._backCompatOption = function (optionName, newValue) {\n const ret = __backCompatOption.call(this, optionName, newValue)\n\n if (ret === this) {\n this.events.options[optionName] = newValue\n }\n\n return ret\n }\n}\n\nfunction pointerEventsMethod (this: Interactable, options: any) {\n extend(this.events.options, options)\n\n return this\n}\n\nconst plugin: Plugin = {\n id: 'pointer-events/interactableTargets',\n install,\n listeners: {\n 'pointerEvents:collect-targets': ({ targets, node, type, eventTarget }, scope) => {\n scope.interactables.forEachMatch(node, (interactable: Interactable) => {\n const eventable = interactable.events\n const options = eventable.options\n\n if (\n eventable.types[type] &&\n eventable.types[type].length &&\n interactable.testIgnoreAllow(options, node, eventTarget)\n ) {\n targets.push({\n node,\n eventable,\n props: { interactable },\n })\n }\n })\n },\n\n 'interactable:new': ({ interactable }) => {\n interactable.events.getRect = function (element: Element) {\n return interactable.getRect(element)\n }\n },\n\n 'interactable:set': ({ interactable, options }, scope) => {\n extend(interactable.events.options, scope.pointerEvents.defaults)\n extend(interactable.events.options, options.pointerEvents || {})\n },\n },\n}\n\nexport default plugin\n","import type { Plugin } from '@interactjs/core/scope'\n\nimport * as pointerEvents from './base'\nimport holdRepeat from './holdRepeat'\nimport interactableTargets from './interactableTargets'\n\nconst plugin: Plugin = {\n id: 'pointer-events',\n install (scope) {\n scope.usePlugin(pointerEvents)\n scope.usePlugin(holdRepeat)\n scope.usePlugin(interactableTargets)\n },\n}\n\nexport default plugin\n","import type { Interactable } from '@interactjs/core/Interactable'\nimport type { ActionProps, DoAnyPhaseArg, Interaction } from '@interactjs/core/Interaction'\nimport type { ActionName, Scope, Plugin } from '@interactjs/core/scope'\nimport type { Element } from '@interactjs/types/index'\nimport * as arr from '@interactjs/utils/arr'\nimport is from '@interactjs/utils/is'\nimport { copyAction } from '@interactjs/utils/misc'\nimport * as pointerUtils from '@interactjs/utils/pointerUtils'\nimport { tlbrToXywh } from '@interactjs/utils/rect'\n\ndeclare module '@interactjs/core/scope' {\n interface SignalArgs {\n 'interactions:before-action-reflow': Omit\n 'interactions:action-reflow': DoAnyPhaseArg\n 'interactions:after-action-reflow': DoAnyPhaseArg\n }\n}\n\ndeclare module '@interactjs/core/Interactable' {\n interface Interactable {\n reflow: (action: ActionProps) => ReturnType\n }\n}\n\ndeclare module '@interactjs/core/Interaction' {\n interface Interaction {\n _reflowPromise: Promise\n _reflowResolve: (...args: unknown[]) => void\n }\n}\n\ndeclare module '@interactjs/core/InteractEvent' {\n interface PhaseMap {\n reflow?: true\n }\n}\n\nexport function install (scope: Scope) {\n const {\n /** @lends Interactable */\n Interactable,\n } = scope\n\n scope.actions.phases.reflow = true\n\n /**\n * ```js\n * const interactable = interact(target)\n * const drag = { name: drag, axis: 'x' }\n * const resize = { name: resize, edges: { left: true, bottom: true }\n *\n * interactable.reflow(drag)\n * interactable.reflow(resize)\n * ```\n *\n * Start an action sequence to re-apply modifiers, check drops, etc.\n *\n * @param { Object } action The action to begin\n * @param { string } action.name The name of the action\n * @returns { Promise } A promise that resolves to the `Interactable` when actions on all targets have ended\n */\n Interactable.prototype.reflow = function (action: ActionProps) {\n return doReflow(this, action, scope)\n }\n}\n\nfunction doReflow (\n interactable: Interactable,\n action: ActionProps,\n scope: Scope,\n): Promise {\n const elements = (is.string(interactable.target)\n ? arr.from(interactable._context.querySelectorAll(interactable.target))\n : [interactable.target]) as Element[]\n\n // tslint:disable-next-line variable-name\n const Promise = (scope.window as any).Promise\n const promises: Array> | null = Promise ? [] : null\n\n for (const element of elements) {\n const rect = interactable.getRect(element as HTMLElement | SVGElement)\n\n if (!rect) {\n break\n }\n\n const runningInteraction = arr.find(scope.interactions.list, (interaction: Interaction) => {\n return (\n interaction.interacting() &&\n interaction.interactable === interactable &&\n interaction.element === element &&\n interaction.prepared.name === action.name\n )\n })\n let reflowPromise: Promise\n\n if (runningInteraction) {\n runningInteraction.move()\n\n if (promises) {\n reflowPromise =\n runningInteraction._reflowPromise ||\n new Promise((resolve: any) => {\n runningInteraction._reflowResolve = resolve\n })\n }\n } else {\n const xywh = tlbrToXywh(rect)\n const coords = {\n page: { x: xywh.x, y: xywh.y },\n client: { x: xywh.x, y: xywh.y },\n timeStamp: scope.now(),\n }\n\n const event = pointerUtils.coordsToEvent(coords)\n reflowPromise = startReflow(scope, interactable, element, action, event)\n }\n\n if (promises) {\n promises.push(reflowPromise)\n }\n }\n\n return promises && Promise.all(promises).then(() => interactable)\n}\n\nfunction startReflow (\n scope: Scope,\n interactable: Interactable,\n element: Element,\n action: ActionProps,\n event: any,\n) {\n const interaction = scope.interactions.new({ pointerType: 'reflow' })\n const signalArg = {\n interaction,\n event,\n pointer: event,\n eventTarget: element,\n phase: 'reflow',\n } as const\n\n interaction.interactable = interactable\n interaction.element = element\n interaction.prevEvent = event\n interaction.updatePointer(event, event, element, true)\n pointerUtils.setZeroCoords(interaction.coords.delta)\n\n copyAction(interaction.prepared, action)\n interaction._doPhase(signalArg)\n\n const { Promise } = (scope.window as unknown) as { Promise: PromiseConstructor }\n const reflowPromise = Promise\n ? new Promise((resolve) => {\n interaction._reflowResolve = resolve\n })\n : undefined\n\n interaction._reflowPromise = reflowPromise\n interaction.start(action, interactable, element)\n\n if (interaction._interacting) {\n interaction.move(signalArg)\n interaction.end(event)\n } else {\n interaction.stop()\n interaction._reflowResolve()\n }\n\n interaction.removePointer(event, event)\n\n return reflowPromise\n}\n\nconst reflow: Plugin = {\n id: 'reflow',\n install,\n listeners: {\n // remove completed reflow interactions\n 'interactions:stop': ({ interaction }, scope) => {\n if (interaction.pointerType === 'reflow') {\n if (interaction._reflowResolve) {\n interaction._reflowResolve()\n }\n\n arr.remove(scope.interactions.list, interaction)\n }\n },\n },\n}\n\nexport default reflow\n","import actions from '@interactjs/actions/plugin'\nimport autoScroll from '@interactjs/auto-scroll/plugin'\nimport autoStart from '@interactjs/auto-start/plugin'\nimport interactablePreventDefault from '@interactjs/core/interactablePreventDefault'\nimport devTools from '@interactjs/dev-tools/plugin'\nimport inertia from '@interactjs/inertia/plugin'\nimport interact from '@interactjs/interact'\nimport modifiers from '@interactjs/modifiers/plugin'\nimport offset from '@interactjs/offset/plugin'\nimport pointerEvents from '@interactjs/pointer-events/plugin'\nimport reflow from '@interactjs/reflow/plugin'\n\ninteract.use(interactablePreventDefault)\n\ninteract.use(offset)\n\n// pointerEvents\ninteract.use(pointerEvents)\n\n// inertia\ninteract.use(inertia)\n\n// snap, resize, etc.\ninteract.use(modifiers)\n\n// autoStart, hold\ninteract.use(autoStart)\n\n// drag and drop, resize, gesture\ninteract.use(actions)\n\n// autoScroll\ninteract.use(autoScroll)\n\n// reflow\ninteract.use(reflow)\n\n// eslint-disable-next-line no-undef\nif (process.env.NODE_ENV !== 'production') {\n interact.use(devTools)\n}\n\nexport default interact\n\nif (typeof module === 'object' && !!module) {\n try {\n module.exports = interact\n } catch {}\n}\n\n;(interact as any).default = interact\n","// eslint-disable-next-line import/no-extraneous-dependencies\nimport interact from '@interactjs/interactjs/index'\n\nexport default interact\n\nif (typeof module === 'object' && !!module) {\n try {\n module.exports = interact\n } catch {}\n}\n\n;(interact as any).default = interact\n","\nreturn _$index_79;\n\n});\n"]}
diff --git a/src/main/resources/static/js/thirdParty/pdf-lib.min.js.map b/src/main/resources/static/js/thirdParty/pdf-lib.min.js.map
new file mode 100644
index 000000000..84106f907
--- /dev/null
+++ b/src/main/resources/static/js/thirdParty/pdf-lib.min.js.map
@@ -0,0 +1 @@
+{"version":3,"file":"pdf-lib.min.js","sources":["../node_modules/tslib/tslib.es6.js","../es/utils/base64.js","../es/utils/unicode.js","../es/utils/strings.js","../es/utils/arrays.js","../es/utils/async.js","../es/utils/numbers.js","../es/utils/errors.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/utils/common.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/trees.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/adler32.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/crc32.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/deflate.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/messages.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/utils/strings.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/zstream.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/deflate.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/inffast.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/inftrees.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/inflate.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/constants.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/zlib/gzheader.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/lib/inflate.js","../node_modules/@pdf-lib/standard-fonts/node_modules/pako/index.js","../node_modules/@pdf-lib/standard-fonts/es/utils.js","../node_modules/@pdf-lib/standard-fonts/es/Font.js","../es/utils/pdfDocEncoding.js","../node_modules/@pdf-lib/standard-fonts/es/Encoding.js","../es/utils/objects.js","../es/utils/validators.js","../es/core/syntax/CharCodes.js","../es/utils/Cache.js","../es/core/errors.js","../node_modules/pako/lib/utils/common.js","../node_modules/pako/lib/zlib/trees.js","../node_modules/pako/lib/zlib/adler32.js","../node_modules/pako/lib/zlib/crc32.js","../node_modules/pako/lib/zlib/deflate.js","../node_modules/pako/lib/zlib/messages.js","../node_modules/pako/lib/utils/strings.js","../node_modules/pako/lib/zlib/zstream.js","../node_modules/pako/lib/deflate.js","../node_modules/pako/lib/zlib/inffast.js","../node_modules/pako/lib/zlib/inftrees.js","../node_modules/pako/lib/zlib/inflate.js","../node_modules/pako/lib/zlib/constants.js","../node_modules/pako/lib/zlib/gzheader.js","../node_modules/pako/lib/inflate.js","../node_modules/pako/index.js","../es/core/document/PDFHeader.js","../es/core/objects/PDFObject.js","../es/core/objects/PDFNumber.js","../es/core/objects/PDFArray.js","../es/core/objects/PDFBool.js","../es/core/syntax/Delimiters.js","../es/core/syntax/Whitespace.js","../es/core/syntax/Irregular.js","../es/core/objects/PDFName.js","../es/core/operators/PDFOperatorNames.js","../es/core/objects/PDFNull.js","../es/core/objects/PDFDict.js","../es/core/objects/PDFStream.js","../es/core/objects/PDFRawStream.js","../es/core/objects/PDFRef.js","../es/core/operators/PDFOperator.js","../es/core/structures/PDFCrossRefStream.js","../es/core/structures/PDFFlateStream.js","../es/core/structures/PDFContentStream.js","../es/utils/rng.js","../es/core/PDFContext.js","../es/core/structures/PDFPageLeaf.js","../es/core/PDFObjectCopier.js","../es/core/document/PDFCrossRefSection.js","../es/core/document/PDFTrailer.js","../es/core/document/PDFTrailerDict.js","../es/core/structures/PDFObjectStream.js","../es/core/writers/PDFWriter.js","../es/core/objects/PDFInvalidObject.js","../es/core/embedders/FileEmbedder.js","../es/core/writers/PDFStreamWriter.js","../es/core/objects/PDFHexString.js","../es/core/embedders/StandardFontEmbedder.js","../es/core/embedders/CMap.js","../es/core/objects/PDFString.js","../es/core/embedders/CustomFontEmbedder.js","../es/core/embedders/FontFlags.js","../es/core/embedders/CustomFontSubsetEmbedder.js","../es/core/embedders/JpegEmbedder.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/utils/common.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/trees.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/adler32.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/crc32.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/deflate.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/messages.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/utils/strings.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/zstream.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/deflate.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/inffast.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/inftrees.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/inflate.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/constants.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/zlib/gzheader.js","../node_modules/@pdf-lib/upng/node_modules/pako/lib/inflate.js","../node_modules/@pdf-lib/upng/node_modules/pako/index.js","../node_modules/@pdf-lib/upng/UPNG.js","../es/utils/png.js","../es/core/interactive/ViewerPreferences.js","../es/core/embedders/PngEmbedder.js","../es/core/streams/Stream.js","../es/core/streams/DecodeStream.js","../es/core/streams/Ascii85Stream.js","../es/core/streams/AsciiHexStream.js","../es/core/streams/FlateStream.js","../es/core/streams/LZWStream.js","../es/core/streams/RunLengthStream.js","../es/core/streams/decode.js","../es/core/embedders/PDFPageEmbedder.js","../es/core/acroform/flags.js","../es/core/acroform/PDFAcroField.js","../es/core/annotation/BorderStyle.js","../es/core/annotation/PDFAnnotation.js","../es/core/annotation/AppearanceCharacteristics.js","../es/core/annotation/PDFWidgetAnnotation.js","../es/core/acroform/PDFAcroTerminal.js","../es/core/acroform/PDFAcroButton.js","../es/core/acroform/PDFAcroCheckBox.js","../es/core/acroform/PDFAcroChoice.js","../es/core/acroform/PDFAcroComboBox.js","../es/core/acroform/PDFAcroNonTerminal.js","../es/core/acroform/PDFAcroSignature.js","../es/core/acroform/PDFAcroText.js","../es/core/acroform/PDFAcroPushButton.js","../es/core/acroform/PDFAcroRadioButton.js","../es/core/acroform/PDFAcroListBox.js","../es/core/acroform/utils.js","../es/core/acroform/PDFAcroForm.js","../es/core/structures/PDFCatalog.js","../es/core/structures/PDFPageTree.js","../es/core/syntax/Numeric.js","../es/core/parser/BaseParser.js","../es/core/annotation/flags.js","../es/core/parser/ByteStream.js","../es/core/syntax/Keywords.js","../es/core/parser/PDFObjectParser.js","../es/core/parser/PDFObjectStreamParser.js","../es/core/parser/PDFXRefStreamParser.js","../es/core/parser/PDFParser.js","../es/api/rotations.js","../es/api/objects.js","../es/api/operators.js","../es/api/colors.js","../es/api/text/alignment.js","../es/api/svgPath.js","../es/api/operations.js","../es/api/errors.js","../es/api/text/layout.js","../es/api/image/alignment.js","../es/api/form/appearances.js","../es/api/PDFEmbeddedPage.js","../es/api/PDFFont.js","../es/api/PDFImage.js","../es/api/form/PDFField.js","../es/api/StandardFonts.js","../es/api/form/PDFCheckBox.js","../es/api/form/PDFDropdown.js","../es/api/form/PDFOptionList.js","../es/api/form/PDFRadioGroup.js","../es/api/form/PDFSignature.js","../es/api/form/PDFTextField.js","../es/api/PDFDocumentOptions.js","../es/api/form/PDFForm.js","../es/api/sizes.js","../es/api/PDFEmbeddedFile.js","../es/api/PDFPageOptions.js","../es/api/PDFJavaScript.js","../es/core/embedders/JavaScriptEmbedder.js","../es/api/PDFDocument.js","../es/api/PDFPage.js","../es/api/form/PDFButton.js"],"sourcesContent":["/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation. All rights reserved.\r\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use\r\nthis file except in compliance with the License. You may obtain a copy of the\r\nLicense at http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nTHIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\nKIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED\r\nWARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,\r\nMERCHANTABLITY OR NON-INFRINGEMENT.\r\n\r\nSee the Apache Version 2.0 License for specific language governing permissions\r\nand limitations under the License.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport function __exportStar(m, exports) {\r\n for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p];\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n};\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];\r\n result.default = mod;\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, privateMap) {\r\n if (!privateMap.has(receiver)) {\r\n throw new TypeError(\"attempted to get private field on non-instance\");\r\n }\r\n return privateMap.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, privateMap, value) {\r\n if (!privateMap.has(receiver)) {\r\n throw new TypeError(\"attempted to set private field on non-instance\");\r\n }\r\n privateMap.set(receiver, value);\r\n return value;\r\n}\r\n","/*\n * The `chars`, `lookup`, `encode`, and `decode` members of this file are\n * licensed under the following:\n *\n * base64-arraybuffer\n * https://github.com/niklasvh/base64-arraybuffer\n *\n * Copyright (c) 2012 Niklas von Hertzen\n * Licensed under the MIT license.\n *\n */\nvar chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';\n// Use a lookup table to find the index.\nvar lookup = new Uint8Array(256);\nfor (var i = 0; i < chars.length; i++) {\n lookup[chars.charCodeAt(i)] = i;\n}\nexport var encodeToBase64 = function (bytes) {\n var base64 = '';\n var len = bytes.length;\n for (var i = 0; i < len; i += 3) {\n base64 += chars[bytes[i] >> 2];\n base64 += chars[((bytes[i] & 3) << 4) | (bytes[i + 1] >> 4)];\n base64 += chars[((bytes[i + 1] & 15) << 2) | (bytes[i + 2] >> 6)];\n base64 += chars[bytes[i + 2] & 63];\n }\n if (len % 3 === 2) {\n base64 = base64.substring(0, base64.length - 1) + '=';\n }\n else if (len % 3 === 1) {\n base64 = base64.substring(0, base64.length - 2) + '==';\n }\n return base64;\n};\nexport var decodeFromBase64 = function (base64) {\n var bufferLength = base64.length * 0.75;\n var len = base64.length;\n var i;\n var p = 0;\n var encoded1;\n var encoded2;\n var encoded3;\n var encoded4;\n if (base64[base64.length - 1] === '=') {\n bufferLength--;\n if (base64[base64.length - 2] === '=') {\n bufferLength--;\n }\n }\n var bytes = new Uint8Array(bufferLength);\n for (i = 0; i < len; i += 4) {\n encoded1 = lookup[base64.charCodeAt(i)];\n encoded2 = lookup[base64.charCodeAt(i + 1)];\n encoded3 = lookup[base64.charCodeAt(i + 2)];\n encoded4 = lookup[base64.charCodeAt(i + 3)];\n bytes[p++] = (encoded1 << 2) | (encoded2 >> 4);\n bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2);\n bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63);\n }\n return bytes;\n};\n// This regex is designed to be as flexible as possible. It will parse certain\n// invalid data URIs.\nvar DATA_URI_PREFIX_REGEX = /^(data)?:?([\\w\\/\\+]+)?;?(charset=[\\w-]+|base64)?.*,/i;\n/**\n * If the `dataUri` input is a data URI, then the data URI prefix must not be\n * longer than 100 characters, or this function will fail to decode it.\n *\n * @param dataUri a base64 data URI or plain base64 string\n * @returns a Uint8Array containing the decoded input\n */\nexport var decodeFromBase64DataUri = function (dataUri) {\n var trimmedUri = dataUri.trim();\n var prefix = trimmedUri.substring(0, 100);\n var res = prefix.match(DATA_URI_PREFIX_REGEX);\n // Assume it's not a data URI - just a plain base64 string\n if (!res)\n return decodeFromBase64(trimmedUri);\n // Remove the data URI prefix and parse the remainder as a base64 string\n var fullMatch = res[0];\n var data = trimmedUri.substring(fullMatch.length);\n return decodeFromBase64(data);\n};\n//# sourceMappingURL=base64.js.map","import { toHexString } from \"./strings\";\n/**\n * Encodes a string to UTF-8.\n *\n * @param input The string to be encoded.\n * @param byteOrderMark Whether or not a byte order marker (BOM) should be added\n * to the start of the encoding. (default `true`)\n * @returns A Uint8Array containing the UTF-8 encoding of the input string.\n *\n * -----------------------------------------------------------------------------\n *\n * JavaScript strings are composed of Unicode code points. Code points are\n * integers in the range 0 to 1,114,111 (0x10FFFF). When serializing a string,\n * it must be encoded as a sequence of words. A word is typically 8, 16, or 32\n * bytes in size. As such, Unicode defines three encoding forms: UTF-8, UTF-16,\n * and UTF-32. These encoding forms are described in the Unicode standard [1].\n * This function implements the UTF-8 encoding form.\n *\n * -----------------------------------------------------------------------------\n *\n * In UTF-8, each code point is mapped to a sequence of 1, 2, 3, or 4 bytes.\n * Note that the logic which defines this mapping is slightly convoluted, and\n * not as straightforward as the mapping logic for UTF-16 or UTF-32. The UTF-8\n * mapping logic is as follows [2]:\n *\n * • If a code point is in the range U+0000..U+007F, then view it as a 7-bit\n * integer: 0bxxxxxxx. Map the code point to 1 byte with the first high order\n * bit set to 0:\n *\n * b1=0b0xxxxxxx\n *\n * • If a code point is in the range U+0080..U+07FF, then view it as an 11-bit\n * integer: 0byyyyyxxxxxx. Map the code point to 2 bytes with the first 5 bits\n * of the code point stored in the first byte, and the last 6 bits stored in\n * the second byte:\n *\n * b1=0b110yyyyy b2=0b10xxxxxx\n *\n * • If a code point is in the range U+0800..U+FFFF, then view it as a 16-bit\n * integer, 0bzzzzyyyyyyxxxxxx. Map the code point to 3 bytes with the first\n * 4 bits stored in the first byte, the next 6 bits stored in the second byte,\n * and the last 6 bits in the third byte:\n *\n * b1=0b1110zzzz b2=0b10yyyyyy b3=0b10xxxxxx\n *\n * • If a code point is in the range U+10000...U+10FFFF, then view it as a\n * 21-bit integer, 0bvvvzzzzzzyyyyyyxxxxxx. Map the code point to 4 bytes with\n * the first 3 bits stored in the first byte, the next 6 bits stored in the\n * second byte, the next 6 bits stored in the third byte, and the last 6 bits\n * stored in the fourth byte:\n *\n * b1=0b11110xxx b2=0b10zzzzzz b3=0b10yyyyyy b4=0b10xxxxxx\n *\n * -----------------------------------------------------------------------------\n *\n * It is important to note, when iterating through the code points of a string\n * in JavaScript, that if a character is encoded as a surrogate pair it will\n * increase the string's length by 2 instead of 1 [4]. For example:\n *\n * ```\n * > 'a'.length\n * 1\n * > '💩'.length\n * 2\n * > '語'.length\n * 1\n * > 'a💩語'.length\n * 4\n * ```\n *\n * The results of the above example are explained by the fact that the\n * characters 'a' and '語' are not represented by surrogate pairs, but '💩' is.\n *\n * Because of this idiosyncrasy in JavaScript's string implementation and APIs,\n * we must \"jump\" an extra index after encoding a character as a surrogate\n * pair. In practice, this means we must increment the index of our for loop by\n * 2 if we encode a surrogate pair, and 1 in all other cases.\n *\n * -----------------------------------------------------------------------------\n *\n * References:\n * - [1] https://www.unicode.org/versions/Unicode12.0.0/UnicodeStandard-12.0.pdf\n * 3.9 Unicode Encoding Forms - UTF-8\n * - [2] http://www.herongyang.com/Unicode/UTF-8-UTF-8-Encoding.html\n * - [3] http://www.herongyang.com/Unicode/UTF-8-UTF-8-Encoding-Algorithm.html\n * - [4] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/length#Description\n *\n */\nexport var utf8Encode = function (input, byteOrderMark) {\n if (byteOrderMark === void 0) { byteOrderMark = true; }\n var encoded = [];\n if (byteOrderMark)\n encoded.push(0xef, 0xbb, 0xbf);\n for (var idx = 0, len = input.length; idx < len;) {\n var codePoint = input.codePointAt(idx);\n // One byte encoding\n if (codePoint < 0x80) {\n var byte1 = codePoint & 0x7f;\n encoded.push(byte1);\n idx += 1;\n }\n // Two byte encoding\n else if (codePoint < 0x0800) {\n var byte1 = ((codePoint >> 6) & 0x1f) | 0xc0;\n var byte2 = (codePoint & 0x3f) | 0x80;\n encoded.push(byte1, byte2);\n idx += 1;\n }\n // Three byte encoding\n else if (codePoint < 0x010000) {\n var byte1 = ((codePoint >> 12) & 0x0f) | 0xe0;\n var byte2 = ((codePoint >> 6) & 0x3f) | 0x80;\n var byte3 = (codePoint & 0x3f) | 0x80;\n encoded.push(byte1, byte2, byte3);\n idx += 1;\n }\n // Four byte encoding (surrogate pair)\n else if (codePoint < 0x110000) {\n var byte1 = ((codePoint >> 18) & 0x07) | 0xf0;\n var byte2 = ((codePoint >> 12) & 0x3f) | 0x80;\n var byte3 = ((codePoint >> 6) & 0x3f) | 0x80;\n var byte4 = ((codePoint >> 0) & 0x3f) | 0x80;\n encoded.push(byte1, byte2, byte3, byte4);\n idx += 2;\n }\n // Should never reach this case\n else\n throw new Error(\"Invalid code point: 0x\" + toHexString(codePoint));\n }\n return new Uint8Array(encoded);\n};\n/**\n * Encodes a string to UTF-16.\n *\n * @param input The string to be encoded.\n * @param byteOrderMark Whether or not a byte order marker (BOM) should be added\n * to the start of the encoding. (default `true`)\n * @returns A Uint16Array containing the UTF-16 encoding of the input string.\n *\n * -----------------------------------------------------------------------------\n *\n * JavaScript strings are composed of Unicode code points. Code points are\n * integers in the range 0 to 1,114,111 (0x10FFFF). When serializing a string,\n * it must be encoded as a sequence of words. A word is typically 8, 16, or 32\n * bytes in size. As such, Unicode defines three encoding forms: UTF-8, UTF-16,\n * and UTF-32. These encoding forms are described in the Unicode standard [1].\n * This function implements the UTF-16 encoding form.\n *\n * -----------------------------------------------------------------------------\n *\n * In UTF-16, each code point is mapped to one or two 16-bit integers. The\n * UTF-16 mapping logic is as follows [2]:\n *\n * • If a code point is in the range U+0000..U+FFFF, then map the code point to\n * a 16-bit integer with the most significant byte first.\n *\n * • If a code point is in the range U+10000..U+10000, then map the code point\n * to two 16-bit integers. The first integer should contain the high surrogate\n * and the second integer should contain the low surrogate. Both surrogates\n * should be written with the most significant byte first.\n *\n * -----------------------------------------------------------------------------\n *\n * It is important to note, when iterating through the code points of a string\n * in JavaScript, that if a character is encoded as a surrogate pair it will\n * increase the string's length by 2 instead of 1 [4]. For example:\n *\n * ```\n * > 'a'.length\n * 1\n * > '💩'.length\n * 2\n * > '語'.length\n * 1\n * > 'a💩語'.length\n * 4\n * ```\n *\n * The results of the above example are explained by the fact that the\n * characters 'a' and '語' are not represented by surrogate pairs, but '💩' is.\n *\n * Because of this idiosyncrasy in JavaScript's string implementation and APIs,\n * we must \"jump\" an extra index after encoding a character as a surrogate\n * pair. In practice, this means we must increment the index of our for loop by\n * 2 if we encode a surrogate pair, and 1 in all other cases.\n *\n * -----------------------------------------------------------------------------\n *\n * References:\n * - [1] https://www.unicode.org/versions/Unicode12.0.0/UnicodeStandard-12.0.pdf\n * 3.9 Unicode Encoding Forms - UTF-8\n * - [2] http://www.herongyang.com/Unicode/UTF-16-UTF-16-Encoding.html\n * - [3] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/length#Description\n *\n */\nexport var utf16Encode = function (input, byteOrderMark) {\n if (byteOrderMark === void 0) { byteOrderMark = true; }\n var encoded = [];\n if (byteOrderMark)\n encoded.push(0xfeff);\n for (var idx = 0, len = input.length; idx < len;) {\n var codePoint = input.codePointAt(idx);\n // Two byte encoding\n if (codePoint < 0x010000) {\n encoded.push(codePoint);\n idx += 1;\n }\n // Four byte encoding (surrogate pair)\n else if (codePoint < 0x110000) {\n encoded.push(highSurrogate(codePoint), lowSurrogate(codePoint));\n idx += 2;\n }\n // Should never reach this case\n else\n throw new Error(\"Invalid code point: 0x\" + toHexString(codePoint));\n }\n return new Uint16Array(encoded);\n};\n/**\n * Returns `true` if the `codePoint` is within the\n * Basic Multilingual Plane (BMP). Code points inside the BMP are not encoded\n * with surrogate pairs.\n * @param codePoint The code point to be evaluated.\n *\n * Reference: https://en.wikipedia.org/wiki/UTF-16#Description\n */\nexport var isWithinBMP = function (codePoint) {\n return codePoint >= 0 && codePoint <= 0xffff;\n};\n/**\n * Returns `true` if the given `codePoint` is valid and must be represented\n * with a surrogate pair when encoded.\n * @param codePoint The code point to be evaluated.\n *\n * Reference: https://en.wikipedia.org/wiki/UTF-16#Description\n */\nexport var hasSurrogates = function (codePoint) {\n return codePoint >= 0x010000 && codePoint <= 0x10ffff;\n};\n// From Unicode 3.0 spec, section 3.7:\n// http://unicode.org/versions/Unicode3.0.0/ch03.pdf\nexport var highSurrogate = function (codePoint) {\n return Math.floor((codePoint - 0x10000) / 0x400) + 0xd800;\n};\n// From Unicode 3.0 spec, section 3.7:\n// http://unicode.org/versions/Unicode3.0.0/ch03.pdf\nexport var lowSurrogate = function (codePoint) {\n return ((codePoint - 0x10000) % 0x400) + 0xdc00;\n};\nvar ByteOrder;\n(function (ByteOrder) {\n ByteOrder[\"BigEndian\"] = \"BigEndian\";\n ByteOrder[\"LittleEndian\"] = \"LittleEndian\";\n})(ByteOrder || (ByteOrder = {}));\nvar REPLACEMENT = '�'.codePointAt(0);\n/**\n * Decodes a Uint8Array of data to a string using UTF-16.\n *\n * Note that this function attempts to recover from erronous input by\n * inserting the replacement character (�) to mark invalid code points\n * and surrogate pairs.\n *\n * @param input A Uint8Array containing UTF-16 encoded data\n * @param byteOrderMark Whether or not a byte order marker (BOM) should be read\n * at the start of the encoding. (default `true`)\n * @returns The decoded string.\n */\nexport var utf16Decode = function (input, byteOrderMark) {\n if (byteOrderMark === void 0) { byteOrderMark = true; }\n // Need at least 2 bytes of data in UTF-16 encodings\n if (input.length <= 1)\n return String.fromCodePoint(REPLACEMENT);\n var byteOrder = byteOrderMark ? readBOM(input) : ByteOrder.BigEndian;\n // Skip byte order mark if needed\n var idx = byteOrderMark ? 2 : 0;\n var codePoints = [];\n while (input.length - idx >= 2) {\n var first = decodeValues(input[idx++], input[idx++], byteOrder);\n if (isHighSurrogate(first)) {\n if (input.length - idx < 2) {\n // Need at least 2 bytes left for the low surrogate that is required\n codePoints.push(REPLACEMENT);\n }\n else {\n var second = decodeValues(input[idx++], input[idx++], byteOrder);\n if (isLowSurrogate(second)) {\n codePoints.push(first, second);\n }\n else {\n // Low surrogates should always follow high surrogates\n codePoints.push(REPLACEMENT);\n }\n }\n }\n else if (isLowSurrogate(first)) {\n // High surrogates should always come first since `decodeValues()`\n // accounts for the byte ordering\n idx += 2;\n codePoints.push(REPLACEMENT);\n }\n else {\n codePoints.push(first);\n }\n }\n // There shouldn't be extra byte(s) left over\n if (idx < input.length)\n codePoints.push(REPLACEMENT);\n return String.fromCodePoint.apply(String, codePoints);\n};\n/**\n * Returns `true` if the given `codePoint` is a high surrogate.\n * @param codePoint The code point to be evaluated.\n *\n * Reference: https://en.wikipedia.org/wiki/UTF-16#Description\n */\nvar isHighSurrogate = function (codePoint) {\n return codePoint >= 0xd800 && codePoint <= 0xdbff;\n};\n/**\n * Returns `true` if the given `codePoint` is a low surrogate.\n * @param codePoint The code point to be evaluated.\n *\n * Reference: https://en.wikipedia.org/wiki/UTF-16#Description\n */\nvar isLowSurrogate = function (codePoint) {\n return codePoint >= 0xdc00 && codePoint <= 0xdfff;\n};\n/**\n * Decodes the given utf-16 values first and second using the specified\n * byte order.\n * @param first The first byte of the encoding.\n * @param second The second byte of the encoding.\n * @param byteOrder The byte order of the encoding.\n * Reference: https://en.wikipedia.org/wiki/UTF-16#Examples\n */\nvar decodeValues = function (first, second, byteOrder) {\n // Append the binary representation of the preceding byte by shifting the\n // first one 8 to the left and than applying a bitwise or-operator to append\n // the second one.\n if (byteOrder === ByteOrder.LittleEndian)\n return (second << 8) | first;\n if (byteOrder === ByteOrder.BigEndian)\n return (first << 8) | second;\n throw new Error(\"Invalid byteOrder: \" + byteOrder);\n};\n/**\n * Returns whether the given array contains a byte order mark for the\n * UTF-16BE or UTF-16LE encoding. If it has neither, BigEndian is assumed.\n *\n * Reference: https://en.wikipedia.org/wiki/Byte_order_mark#UTF-16\n *\n * @param bytes The byte array to be evaluated.\n */\n// prettier-ignore\nvar readBOM = function (bytes) { return (hasUtf16BigEndianBOM(bytes) ? ByteOrder.BigEndian\n : hasUtf16LittleEndianBOM(bytes) ? ByteOrder.LittleEndian\n : ByteOrder.BigEndian); };\nvar hasUtf16BigEndianBOM = function (bytes) {\n return bytes[0] === 0xfe && bytes[1] === 0xff;\n};\nvar hasUtf16LittleEndianBOM = function (bytes) {\n return bytes[0] === 0xff && bytes[1] === 0xfe;\n};\nexport var hasUtf16BOM = function (bytes) {\n return hasUtf16BigEndianBOM(bytes) || hasUtf16LittleEndianBOM(bytes);\n};\n//# sourceMappingURL=unicode.js.map","export var toCharCode = function (character) { return character.charCodeAt(0); };\nexport var toCodePoint = function (character) { return character.codePointAt(0); };\nexport var toHexStringOfMinLength = function (num, minLength) {\n return padStart(num.toString(16), minLength, '0').toUpperCase();\n};\nexport var toHexString = function (num) { return toHexStringOfMinLength(num, 2); };\nexport var charFromCode = function (code) { return String.fromCharCode(code); };\nexport var charFromHexCode = function (hex) { return charFromCode(parseInt(hex, 16)); };\nexport var padStart = function (value, length, padChar) {\n var padding = '';\n for (var idx = 0, len = length - value.length; idx < len; idx++) {\n padding += padChar;\n }\n return padding + value;\n};\nexport var copyStringIntoBuffer = function (str, buffer, offset) {\n var length = str.length;\n for (var idx = 0; idx < length; idx++) {\n buffer[offset++] = str.charCodeAt(idx);\n }\n return length;\n};\nexport var addRandomSuffix = function (prefix, suffixLength) {\n if (suffixLength === void 0) { suffixLength = 4; }\n return prefix + \"-\" + Math.floor(Math.random() * Math.pow(10, suffixLength));\n};\nexport var escapeRegExp = function (str) {\n return str.replace(/[.*+?^${}()|[\\]\\\\]/g, '\\\\$&');\n};\nexport var cleanText = function (text) {\n return text.replace(/\\t|\\u0085|\\u2028|\\u2029/g, ' ').replace(/[\\b\\v]/g, '');\n};\nexport var escapedNewlineChars = ['\\\\n', '\\\\f', '\\\\r', '\\\\u000B'];\nexport var newlineChars = ['\\n', '\\f', '\\r', '\\u000B'];\nexport var isNewlineChar = function (text) { return /^[\\n\\f\\r\\u000B]$/.test(text); };\nexport var lineSplit = function (text) { return text.split(/[\\n\\f\\r\\u000B]/); };\nexport var mergeLines = function (text) {\n return text.replace(/[\\n\\f\\r\\u000B]/g, ' ');\n};\n// JavaScript's String.charAt() method doesn work on strings containing UTF-16\n// characters (with high and low surrogate pairs), such as 💩 (poo emoji). This\n// `charAtIndex()` function does.\n//\n// Credit: https://github.com/mathiasbynens/String.prototype.at/blob/master/at.js#L14-L48\nexport var charAtIndex = function (text, index) {\n // Get the first code unit and code unit value\n var cuFirst = text.charCodeAt(index);\n var cuSecond;\n var nextIndex = index + 1;\n var length = 1;\n if (\n // Check if it's the start of a surrogate pair.\n cuFirst >= 0xd800 &&\n cuFirst <= 0xdbff && // high surrogate\n text.length > nextIndex // there is a next code unit\n ) {\n cuSecond = text.charCodeAt(nextIndex);\n if (cuSecond >= 0xdc00 && cuSecond <= 0xdfff)\n length = 2; // low surrogate\n }\n return [text.slice(index, index + length), length];\n};\nexport var charSplit = function (text) {\n var chars = [];\n for (var idx = 0, len = text.length; idx < len;) {\n var _a = charAtIndex(text, idx), c = _a[0], cLen = _a[1];\n chars.push(c);\n idx += cLen;\n }\n return chars;\n};\nvar buildWordBreakRegex = function (wordBreaks) {\n var newlineCharUnion = escapedNewlineChars.join('|');\n var escapedRules = ['$'];\n for (var idx = 0, len = wordBreaks.length; idx < len; idx++) {\n var wordBreak = wordBreaks[idx];\n if (isNewlineChar(wordBreak)) {\n throw new TypeError(\"`wordBreak` must not include \" + newlineCharUnion);\n }\n escapedRules.push(wordBreak === '' ? '.' : escapeRegExp(wordBreak));\n }\n var breakRules = escapedRules.join('|');\n return new RegExp(\"(\" + newlineCharUnion + \")|((.*?)(\" + breakRules + \"))\", 'gm');\n};\nexport var breakTextIntoLines = function (text, wordBreaks, maxWidth, computeWidthOfText) {\n var regex = buildWordBreakRegex(wordBreaks);\n var words = cleanText(text).match(regex);\n var currLine = '';\n var currWidth = 0;\n var lines = [];\n var pushCurrLine = function () {\n if (currLine !== '')\n lines.push(currLine);\n currLine = '';\n currWidth = 0;\n };\n for (var idx = 0, len = words.length; idx < len; idx++) {\n var word = words[idx];\n if (isNewlineChar(word)) {\n pushCurrLine();\n }\n else {\n var width = computeWidthOfText(word);\n if (currWidth + width > maxWidth)\n pushCurrLine();\n currLine += word;\n currWidth += width;\n }\n }\n pushCurrLine();\n return lines;\n};\n// See section \"7.9.4 Dates\" of the PDF specification\nvar dateRegex = /^D:(\\d\\d\\d\\d)(\\d\\d)?(\\d\\d)?(\\d\\d)?(\\d\\d)?(\\d\\d)?([+\\-Z])?(\\d\\d)?'?(\\d\\d)?'?$/;\nexport var parseDate = function (dateStr) {\n var match = dateStr.match(dateRegex);\n if (!match)\n return undefined;\n var year = match[1], _a = match[2], month = _a === void 0 ? '01' : _a, _b = match[3], day = _b === void 0 ? '01' : _b, _c = match[4], hours = _c === void 0 ? '00' : _c, _d = match[5], mins = _d === void 0 ? '00' : _d, _e = match[6], secs = _e === void 0 ? '00' : _e, _f = match[7], offsetSign = _f === void 0 ? 'Z' : _f, _g = match[8], offsetHours = _g === void 0 ? '00' : _g, _h = match[9], offsetMins = _h === void 0 ? '00' : _h;\n // http://www.ecma-international.org/ecma-262/5.1/#sec-15.9.1.15\n var tzOffset = offsetSign === 'Z' ? 'Z' : \"\" + offsetSign + offsetHours + \":\" + offsetMins;\n var date = new Date(year + \"-\" + month + \"-\" + day + \"T\" + hours + \":\" + mins + \":\" + secs + tzOffset);\n return date;\n};\nexport var findLastMatch = function (value, regex) {\n var _a;\n var position = 0;\n var lastMatch;\n while (position < value.length) {\n var match = value.substring(position).match(regex);\n if (!match)\n return { match: lastMatch, pos: position };\n lastMatch = match;\n position += ((_a = match.index) !== null && _a !== void 0 ? _a : 0) + match[0].length;\n }\n return { match: lastMatch, pos: position };\n};\n//# sourceMappingURL=strings.js.map","import { decodeFromBase64DataUri } from \"./base64\";\nimport { charFromCode } from \"./strings\";\nexport var last = function (array) { return array[array.length - 1]; };\n// export const dropLast = (array: T[]): T[] =>\n// array.slice(0, array.length - 1);\nexport var typedArrayFor = function (value) {\n if (value instanceof Uint8Array)\n return value;\n var length = value.length;\n var typedArray = new Uint8Array(length);\n for (var idx = 0; idx < length; idx++) {\n typedArray[idx] = value.charCodeAt(idx);\n }\n return typedArray;\n};\nexport var mergeIntoTypedArray = function () {\n var arrays = [];\n for (var _i = 0; _i < arguments.length; _i++) {\n arrays[_i] = arguments[_i];\n }\n var arrayCount = arrays.length;\n var typedArrays = [];\n for (var idx = 0; idx < arrayCount; idx++) {\n var element = arrays[idx];\n typedArrays[idx] =\n element instanceof Uint8Array ? element : typedArrayFor(element);\n }\n var totalSize = 0;\n for (var idx = 0; idx < arrayCount; idx++) {\n totalSize += arrays[idx].length;\n }\n var merged = new Uint8Array(totalSize);\n var offset = 0;\n for (var arrIdx = 0; arrIdx < arrayCount; arrIdx++) {\n var arr = typedArrays[arrIdx];\n for (var byteIdx = 0, arrLen = arr.length; byteIdx < arrLen; byteIdx++) {\n merged[offset++] = arr[byteIdx];\n }\n }\n return merged;\n};\nexport var mergeUint8Arrays = function (arrays) {\n var totalSize = 0;\n for (var idx = 0, len = arrays.length; idx < len; idx++) {\n totalSize += arrays[idx].length;\n }\n var mergedBuffer = new Uint8Array(totalSize);\n var offset = 0;\n for (var idx = 0, len = arrays.length; idx < len; idx++) {\n var array = arrays[idx];\n mergedBuffer.set(array, offset);\n offset += array.length;\n }\n return mergedBuffer;\n};\nexport var arrayAsString = function (array) {\n var str = '';\n for (var idx = 0, len = array.length; idx < len; idx++) {\n str += charFromCode(array[idx]);\n }\n return str;\n};\nexport var byAscendingId = function (a, b) { return a.id - b.id; };\nexport var sortedUniq = function (array, indexer) {\n var uniq = [];\n for (var idx = 0, len = array.length; idx < len; idx++) {\n var curr = array[idx];\n var prev = array[idx - 1];\n if (idx === 0 || indexer(curr) !== indexer(prev)) {\n uniq.push(curr);\n }\n }\n return uniq;\n};\n// Arrays and TypedArrays in JS both have .reverse() methods, which would seem\n// to negate the need for this function. However, not all runtimes support this\n// method (e.g. React Native). This function compensates for that fact.\nexport var reverseArray = function (array) {\n var arrayLen = array.length;\n for (var idx = 0, len = Math.floor(arrayLen / 2); idx < len; idx++) {\n var leftIdx = idx;\n var rightIdx = arrayLen - idx - 1;\n var temp = array[idx];\n array[leftIdx] = array[rightIdx];\n array[rightIdx] = temp;\n }\n return array;\n};\nexport var sum = function (array) {\n var total = 0;\n for (var idx = 0, len = array.length; idx < len; idx++) {\n total += array[idx];\n }\n return total;\n};\nexport var range = function (start, end) {\n var arr = new Array(end - start);\n for (var idx = 0, len = arr.length; idx < len; idx++) {\n arr[idx] = start + idx;\n }\n return arr;\n};\nexport var pluckIndices = function (arr, indices) {\n var plucked = new Array(indices.length);\n for (var idx = 0, len = indices.length; idx < len; idx++) {\n plucked[idx] = arr[indices[idx]];\n }\n return plucked;\n};\nexport var canBeConvertedToUint8Array = function (input) {\n return input instanceof Uint8Array ||\n input instanceof ArrayBuffer ||\n typeof input === 'string';\n};\nexport var toUint8Array = function (input) {\n if (typeof input === 'string') {\n return decodeFromBase64DataUri(input);\n }\n else if (input instanceof ArrayBuffer) {\n return new Uint8Array(input);\n }\n else if (input instanceof Uint8Array) {\n return input;\n }\n else {\n throw new TypeError('`input` must be one of `string | ArrayBuffer | Uint8Array`');\n }\n};\n//# sourceMappingURL=arrays.js.map","/**\n * Returns a Promise that resolves after at least one tick of the\n * Macro Task Queue occurs.\n */\nexport var waitForTick = function () {\n return new Promise(function (resolve) {\n setTimeout(function () { return resolve(); }, 0);\n });\n};\n//# sourceMappingURL=async.js.map","// tslint:disable radix\n/**\n * Converts a number to its string representation in decimal. This function\n * differs from simply converting a number to a string with `.toString()`\n * because this function's output string will **not** contain exponential\n * notation.\n *\n * Credit: https://stackoverflow.com/a/46545519\n */\nexport var numberToString = function (num) {\n var numStr = String(num);\n if (Math.abs(num) < 1.0) {\n var e = parseInt(num.toString().split('e-')[1]);\n if (e) {\n var negative = num < 0;\n if (negative)\n num *= -1;\n num *= Math.pow(10, e - 1);\n numStr = '0.' + new Array(e).join('0') + num.toString().substring(2);\n if (negative)\n numStr = '-' + numStr;\n }\n }\n else {\n var e = parseInt(num.toString().split('+')[1]);\n if (e > 20) {\n e -= 20;\n num /= Math.pow(10, e);\n numStr = num.toString() + new Array(e + 1).join('0');\n }\n }\n return numStr;\n};\nexport var sizeInBytes = function (n) { return Math.ceil(n.toString(2).length / 8); };\n/**\n * Converts a number into its constituent bytes and returns them as\n * a number[].\n *\n * Returns most significant byte as first element in array. It may be necessary\n * to call .reverse() to get the bits in the desired order.\n *\n * Example:\n * bytesFor(0x02A41E) => [ 0b10, 0b10100100, 0b11110 ]\n *\n * Credit for algorithm: https://stackoverflow.com/a/1936865\n */\nexport var bytesFor = function (n) {\n var bytes = new Uint8Array(sizeInBytes(n));\n for (var i = 1; i <= bytes.length; i++) {\n bytes[i - 1] = n >> ((bytes.length - i) * 8);\n }\n return bytes;\n};\n//# sourceMappingURL=numbers.js.map","export var error = function (msg) {\n throw new Error(msg);\n};\n//# sourceMappingURL=errors.js.map","'use strict';\n\n\nvar TYPED_OK = (typeof Uint8Array !== 'undefined') &&\n (typeof Uint16Array !== 'undefined') &&\n (typeof Int32Array !== 'undefined');\n\nfunction _has(obj, key) {\n return Object.prototype.hasOwnProperty.call(obj, key);\n}\n\nexports.assign = function (obj /*from1, from2, from3, ...*/) {\n var sources = Array.prototype.slice.call(arguments, 1);\n while (sources.length) {\n var source = sources.shift();\n if (!source) { continue; }\n\n if (typeof source !== 'object') {\n throw new TypeError(source + 'must be non-object');\n }\n\n for (var p in source) {\n if (_has(source, p)) {\n obj[p] = source[p];\n }\n }\n }\n\n return obj;\n};\n\n\n// reduce buffer size, avoiding mem copy\nexports.shrinkBuf = function (buf, size) {\n if (buf.length === size) { return buf; }\n if (buf.subarray) { return buf.subarray(0, size); }\n buf.length = size;\n return buf;\n};\n\n\nvar fnTyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n if (src.subarray && dest.subarray) {\n dest.set(src.subarray(src_offs, src_offs + len), dest_offs);\n return;\n }\n // Fallback to ordinary array\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n var i, l, len, pos, chunk, result;\n\n // calculate data length\n len = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n len += chunks[i].length;\n }\n\n // join chunks\n result = new Uint8Array(len);\n pos = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n chunk = chunks[i];\n result.set(chunk, pos);\n pos += chunk.length;\n }\n\n return result;\n }\n};\n\nvar fnUntyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n return [].concat.apply([], chunks);\n }\n};\n\n\n// Enable/Disable typed arrays use, for testing\n//\nexports.setTyped = function (on) {\n if (on) {\n exports.Buf8 = Uint8Array;\n exports.Buf16 = Uint16Array;\n exports.Buf32 = Int32Array;\n exports.assign(exports, fnTyped);\n } else {\n exports.Buf8 = Array;\n exports.Buf16 = Array;\n exports.Buf32 = Array;\n exports.assign(exports, fnUntyped);\n }\n};\n\nexports.setTyped(TYPED_OK);\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n/* eslint-disable space-unary-ops */\n\nvar utils = require('../utils/common');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n//var Z_FILTERED = 1;\n//var Z_HUFFMAN_ONLY = 2;\n//var Z_RLE = 3;\nvar Z_FIXED = 4;\n//var Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\nvar Z_BINARY = 0;\nvar Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n/*============================================================================*/\n\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n// From zutil.h\n\nvar STORED_BLOCK = 0;\nvar STATIC_TREES = 1;\nvar DYN_TREES = 2;\n/* The three kinds of block type */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\n/* The minimum and maximum match lengths */\n\n// From deflate.h\n/* ===========================================================================\n * Internal compression state.\n */\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\n\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\n\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\n\nvar D_CODES = 30;\n/* number of distance codes */\n\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\n\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\n\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar Buf_size = 16;\n/* size of bit buffer in bi_buf */\n\n\n/* ===========================================================================\n * Constants\n */\n\nvar MAX_BL_BITS = 7;\n/* Bit length codes must not exceed MAX_BL_BITS bits */\n\nvar END_BLOCK = 256;\n/* end of block literal code */\n\nvar REP_3_6 = 16;\n/* repeat previous bit length 3-6 times (2 bits of repeat count) */\n\nvar REPZ_3_10 = 17;\n/* repeat a zero length 3-10 times (3 bits of repeat count) */\n\nvar REPZ_11_138 = 18;\n/* repeat a zero length 11-138 times (7 bits of repeat count) */\n\n/* eslint-disable comma-spacing,array-bracket-spacing */\nvar extra_lbits = /* extra bits for each length code */\n [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0];\n\nvar extra_dbits = /* extra bits for each distance code */\n [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13];\n\nvar extra_blbits = /* extra bits for each bit length code */\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7];\n\nvar bl_order =\n [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];\n/* eslint-enable comma-spacing,array-bracket-spacing */\n\n/* The lengths of the bit length codes are sent in order of decreasing\n * probability, to avoid transmitting the lengths for unused bit length codes.\n */\n\n/* ===========================================================================\n * Local data. These are initialized only once.\n */\n\n// We pre-fill arrays with 0 to avoid uninitialized gaps\n\nvar DIST_CODE_LEN = 512; /* see definition of array dist_code below */\n\n// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1\nvar static_ltree = new Array((L_CODES + 2) * 2);\nzero(static_ltree);\n/* The static literal tree. Since the bit lengths are imposed, there is no\n * need for the L_CODES extra codes used during heap construction. However\n * The codes 286 and 287 are needed to build a canonical tree (see _tr_init\n * below).\n */\n\nvar static_dtree = new Array(D_CODES * 2);\nzero(static_dtree);\n/* The static distance tree. (Actually a trivial tree since all codes use\n * 5 bits.)\n */\n\nvar _dist_code = new Array(DIST_CODE_LEN);\nzero(_dist_code);\n/* Distance codes. The first 256 values correspond to the distances\n * 3 .. 258, the last 256 values correspond to the top 8 bits of\n * the 15 bit distances.\n */\n\nvar _length_code = new Array(MAX_MATCH - MIN_MATCH + 1);\nzero(_length_code);\n/* length code for each normalized match length (0 == MIN_MATCH) */\n\nvar base_length = new Array(LENGTH_CODES);\nzero(base_length);\n/* First normalized length for each code (0 = MIN_MATCH) */\n\nvar base_dist = new Array(D_CODES);\nzero(base_dist);\n/* First normalized distance for each code (0 = distance of 1) */\n\n\nfunction StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) {\n\n this.static_tree = static_tree; /* static tree or NULL */\n this.extra_bits = extra_bits; /* extra bits for each code or NULL */\n this.extra_base = extra_base; /* base index for extra_bits */\n this.elems = elems; /* max number of elements in the tree */\n this.max_length = max_length; /* max bit length for the codes */\n\n // show if `static_tree` has data or dummy - needed for monomorphic objects\n this.has_stree = static_tree && static_tree.length;\n}\n\n\nvar static_l_desc;\nvar static_d_desc;\nvar static_bl_desc;\n\n\nfunction TreeDesc(dyn_tree, stat_desc) {\n this.dyn_tree = dyn_tree; /* the dynamic tree */\n this.max_code = 0; /* largest code with non zero frequency */\n this.stat_desc = stat_desc; /* the corresponding static tree */\n}\n\n\n\nfunction d_code(dist) {\n return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];\n}\n\n\n/* ===========================================================================\n * Output a short LSB first on the stream.\n * IN assertion: there is enough room in pendingBuf.\n */\nfunction put_short(s, w) {\n// put_byte(s, (uch)((w) & 0xff));\n// put_byte(s, (uch)((ush)(w) >> 8));\n s.pending_buf[s.pending++] = (w) & 0xff;\n s.pending_buf[s.pending++] = (w >>> 8) & 0xff;\n}\n\n\n/* ===========================================================================\n * Send a value on a given number of bits.\n * IN assertion: length <= 16 and value fits in length bits.\n */\nfunction send_bits(s, value, length) {\n if (s.bi_valid > (Buf_size - length)) {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n put_short(s, s.bi_buf);\n s.bi_buf = value >> (Buf_size - s.bi_valid);\n s.bi_valid += length - Buf_size;\n } else {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n s.bi_valid += length;\n }\n}\n\n\nfunction send_code(s, c, tree) {\n send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/);\n}\n\n\n/* ===========================================================================\n * Reverse the first len bits of a code, using straightforward code (a faster\n * method would use a table)\n * IN assertion: 1 <= len <= 15\n */\nfunction bi_reverse(code, len) {\n var res = 0;\n do {\n res |= code & 1;\n code >>>= 1;\n res <<= 1;\n } while (--len > 0);\n return res >>> 1;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer, keeping at most 7 bits in it.\n */\nfunction bi_flush(s) {\n if (s.bi_valid === 16) {\n put_short(s, s.bi_buf);\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n } else if (s.bi_valid >= 8) {\n s.pending_buf[s.pending++] = s.bi_buf & 0xff;\n s.bi_buf >>= 8;\n s.bi_valid -= 8;\n }\n}\n\n\n/* ===========================================================================\n * Compute the optimal bit lengths for a tree and update the total bit length\n * for the current block.\n * IN assertion: the fields freq and dad are set, heap[heap_max] and\n * above are the tree nodes sorted by increasing frequency.\n * OUT assertions: the field len is set to the optimal bit length, the\n * array bl_count contains the frequencies for each bit length.\n * The length opt_len is updated; static_len is also updated if stree is\n * not null.\n */\nfunction gen_bitlen(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var max_code = desc.max_code;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var extra = desc.stat_desc.extra_bits;\n var base = desc.stat_desc.extra_base;\n var max_length = desc.stat_desc.max_length;\n var h; /* heap index */\n var n, m; /* iterate over the tree elements */\n var bits; /* bit length */\n var xbits; /* extra bits */\n var f; /* frequency */\n var overflow = 0; /* number of elements with bit length too large */\n\n for (bits = 0; bits <= MAX_BITS; bits++) {\n s.bl_count[bits] = 0;\n }\n\n /* In a first pass, compute the optimal bit lengths (which may\n * overflow in the case of the bit length tree).\n */\n tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */\n\n for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {\n n = s.heap[h];\n bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1;\n if (bits > max_length) {\n bits = max_length;\n overflow++;\n }\n tree[n * 2 + 1]/*.Len*/ = bits;\n /* We overwrite tree[n].Dad which is no longer needed */\n\n if (n > max_code) { continue; } /* not a leaf node */\n\n s.bl_count[bits]++;\n xbits = 0;\n if (n >= base) {\n xbits = extra[n - base];\n }\n f = tree[n * 2]/*.Freq*/;\n s.opt_len += f * (bits + xbits);\n if (has_stree) {\n s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);\n }\n }\n if (overflow === 0) { return; }\n\n // Trace((stderr,\"\\nbit length overflow\\n\"));\n /* This happens for example on obj2 and pic of the Calgary corpus */\n\n /* Find the first bit length which could increase: */\n do {\n bits = max_length - 1;\n while (s.bl_count[bits] === 0) { bits--; }\n s.bl_count[bits]--; /* move one leaf down the tree */\n s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */\n s.bl_count[max_length]--;\n /* The brother of the overflow item also moves one step up,\n * but this does not affect bl_count[max_length]\n */\n overflow -= 2;\n } while (overflow > 0);\n\n /* Now recompute all bit lengths, scanning in increasing frequency.\n * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all\n * lengths instead of fixing only the wrong ones. This idea is taken\n * from 'ar' written by Haruhiko Okumura.)\n */\n for (bits = max_length; bits !== 0; bits--) {\n n = s.bl_count[bits];\n while (n !== 0) {\n m = s.heap[--h];\n if (m > max_code) { continue; }\n if (tree[m * 2 + 1]/*.Len*/ !== bits) {\n // Trace((stderr,\"code %d bits %d->%d\\n\", m, tree[m].Len, bits));\n s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/;\n tree[m * 2 + 1]/*.Len*/ = bits;\n }\n n--;\n }\n }\n}\n\n\n/* ===========================================================================\n * Generate the codes for a given tree and bit counts (which need not be\n * optimal).\n * IN assertion: the array bl_count contains the bit length statistics for\n * the given tree and the field len is set for all tree elements.\n * OUT assertion: the field code is set for all tree elements of non\n * zero code length.\n */\nfunction gen_codes(tree, max_code, bl_count)\n// ct_data *tree; /* the tree to decorate */\n// int max_code; /* largest code with non zero frequency */\n// ushf *bl_count; /* number of codes at each bit length */\n{\n var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */\n var code = 0; /* running code value */\n var bits; /* bit index */\n var n; /* code index */\n\n /* The distribution counts are first used to generate the code values\n * without bit reversal.\n */\n for (bits = 1; bits <= MAX_BITS; bits++) {\n next_code[bits] = code = (code + bl_count[bits - 1]) << 1;\n }\n /* Check that the bit counts in bl_count are consistent. The last code\n * must be all ones.\n */\n //Assert (code + bl_count[MAX_BITS]-1 == (1< length code (0..28) */\n length = 0;\n for (code = 0; code < LENGTH_CODES - 1; code++) {\n base_length[code] = length;\n for (n = 0; n < (1 << extra_lbits[code]); n++) {\n _length_code[length++] = code;\n }\n }\n //Assert (length == 256, \"tr_static_init: length != 256\");\n /* Note that the length 255 (match length 258) can be represented\n * in two different ways: code 284 + 5 bits or code 285, so we\n * overwrite length_code[255] to use the best encoding:\n */\n _length_code[length - 1] = code;\n\n /* Initialize the mapping dist (0..32K) -> dist code (0..29) */\n dist = 0;\n for (code = 0; code < 16; code++) {\n base_dist[code] = dist;\n for (n = 0; n < (1 << extra_dbits[code]); n++) {\n _dist_code[dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: dist != 256\");\n dist >>= 7; /* from now on, all distances are divided by 128 */\n for (; code < D_CODES; code++) {\n base_dist[code] = dist << 7;\n for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {\n _dist_code[256 + dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: 256+dist != 512\");\n\n /* Construct the codes of the static literal tree */\n for (bits = 0; bits <= MAX_BITS; bits++) {\n bl_count[bits] = 0;\n }\n\n n = 0;\n while (n <= 143) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n while (n <= 255) {\n static_ltree[n * 2 + 1]/*.Len*/ = 9;\n n++;\n bl_count[9]++;\n }\n while (n <= 279) {\n static_ltree[n * 2 + 1]/*.Len*/ = 7;\n n++;\n bl_count[7]++;\n }\n while (n <= 287) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n /* Codes 286 and 287 do not exist, but we must include them in the\n * tree construction to get a canonical Huffman tree (longest code\n * all ones)\n */\n gen_codes(static_ltree, L_CODES + 1, bl_count);\n\n /* The static distance tree is trivial: */\n for (n = 0; n < D_CODES; n++) {\n static_dtree[n * 2 + 1]/*.Len*/ = 5;\n static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5);\n }\n\n // Now data ready and we can init static trees\n static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);\n static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS);\n static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS);\n\n //static_init_done = true;\n}\n\n\n/* ===========================================================================\n * Initialize a new block.\n */\nfunction init_block(s) {\n var n; /* iterates over tree elements */\n\n /* Initialize the trees. */\n for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; }\n\n s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;\n s.opt_len = s.static_len = 0;\n s.last_lit = s.matches = 0;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer and align the output on a byte boundary\n */\nfunction bi_windup(s)\n{\n if (s.bi_valid > 8) {\n put_short(s, s.bi_buf);\n } else if (s.bi_valid > 0) {\n //put_byte(s, (Byte)s->bi_buf);\n s.pending_buf[s.pending++] = s.bi_buf;\n }\n s.bi_buf = 0;\n s.bi_valid = 0;\n}\n\n/* ===========================================================================\n * Copy a stored block, storing first the length and its\n * one's complement if requested.\n */\nfunction copy_block(s, buf, len, header)\n//DeflateState *s;\n//charf *buf; /* the input data */\n//unsigned len; /* its length */\n//int header; /* true if block header must be written */\n{\n bi_windup(s); /* align on byte boundary */\n\n if (header) {\n put_short(s, len);\n put_short(s, ~len);\n }\n// while (len--) {\n// put_byte(s, *buf++);\n// }\n utils.arraySet(s.pending_buf, s.window, buf, len, s.pending);\n s.pending += len;\n}\n\n/* ===========================================================================\n * Compares to subtrees, using the tree depth as tie breaker when\n * the subtrees have equal frequency. This minimizes the worst case length.\n */\nfunction smaller(tree, n, m, depth) {\n var _n2 = n * 2;\n var _m2 = m * 2;\n return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||\n (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));\n}\n\n/* ===========================================================================\n * Restore the heap property by moving down the tree starting at node k,\n * exchanging a node with the smallest of its two sons if necessary, stopping\n * when the heap property is re-established (each father smaller than its\n * two sons).\n */\nfunction pqdownheap(s, tree, k)\n// deflate_state *s;\n// ct_data *tree; /* the tree to restore */\n// int k; /* node to move down */\n{\n var v = s.heap[k];\n var j = k << 1; /* left son of k */\n while (j <= s.heap_len) {\n /* Set j to the smallest of the two sons: */\n if (j < s.heap_len &&\n smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {\n j++;\n }\n /* Exit if v is smaller than both sons */\n if (smaller(tree, v, s.heap[j], s.depth)) { break; }\n\n /* Exchange v with the smallest son */\n s.heap[k] = s.heap[j];\n k = j;\n\n /* And continue down the tree, setting j to the left son of k */\n j <<= 1;\n }\n s.heap[k] = v;\n}\n\n\n// inlined manually\n// var SMALLEST = 1;\n\n/* ===========================================================================\n * Send the block data compressed using the given Huffman trees\n */\nfunction compress_block(s, ltree, dtree)\n// deflate_state *s;\n// const ct_data *ltree; /* literal tree */\n// const ct_data *dtree; /* distance tree */\n{\n var dist; /* distance of matched string */\n var lc; /* match length or unmatched char (if dist == 0) */\n var lx = 0; /* running index in l_buf */\n var code; /* the code to send */\n var extra; /* number of extra bits to send */\n\n if (s.last_lit !== 0) {\n do {\n dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]);\n lc = s.pending_buf[s.l_buf + lx];\n lx++;\n\n if (dist === 0) {\n send_code(s, lc, ltree); /* send a literal byte */\n //Tracecv(isgraph(lc), (stderr,\" '%c' \", lc));\n } else {\n /* Here, lc is the match length - MIN_MATCH */\n code = _length_code[lc];\n send_code(s, code + LITERALS + 1, ltree); /* send the length code */\n extra = extra_lbits[code];\n if (extra !== 0) {\n lc -= base_length[code];\n send_bits(s, lc, extra); /* send the extra length bits */\n }\n dist--; /* dist is now the match distance - 1 */\n code = d_code(dist);\n //Assert (code < D_CODES, \"bad d_code\");\n\n send_code(s, code, dtree); /* send the distance code */\n extra = extra_dbits[code];\n if (extra !== 0) {\n dist -= base_dist[code];\n send_bits(s, dist, extra); /* send the extra distance bits */\n }\n } /* literal or match pair ? */\n\n /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */\n //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,\n // \"pendingBuf overflow\");\n\n } while (lx < s.last_lit);\n }\n\n send_code(s, END_BLOCK, ltree);\n}\n\n\n/* ===========================================================================\n * Construct one Huffman tree and assigns the code bit strings and lengths.\n * Update the total bit length for the current block.\n * IN assertion: the field freq is set for all tree elements.\n * OUT assertions: the fields len and code are set to the optimal bit length\n * and corresponding code. The length opt_len is updated; static_len is\n * also updated if stree is not null. The field max_code is set.\n */\nfunction build_tree(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var elems = desc.stat_desc.elems;\n var n, m; /* iterate over heap elements */\n var max_code = -1; /* largest code with non zero frequency */\n var node; /* new node being created */\n\n /* Construct the initial heap, with least frequent element in\n * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].\n * heap[0] is not used.\n */\n s.heap_len = 0;\n s.heap_max = HEAP_SIZE;\n\n for (n = 0; n < elems; n++) {\n if (tree[n * 2]/*.Freq*/ !== 0) {\n s.heap[++s.heap_len] = max_code = n;\n s.depth[n] = 0;\n\n } else {\n tree[n * 2 + 1]/*.Len*/ = 0;\n }\n }\n\n /* The pkzip format requires that at least one distance code exists,\n * and that at least one bit should be sent even if there is only one\n * possible code. So to avoid special checks later on we force at least\n * two codes of non zero frequency.\n */\n while (s.heap_len < 2) {\n node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);\n tree[node * 2]/*.Freq*/ = 1;\n s.depth[node] = 0;\n s.opt_len--;\n\n if (has_stree) {\n s.static_len -= stree[node * 2 + 1]/*.Len*/;\n }\n /* node is 0 or 1 so it does not have extra bits */\n }\n desc.max_code = max_code;\n\n /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,\n * establish sub-heaps of increasing lengths:\n */\n for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); }\n\n /* Construct the Huffman tree by repeatedly combining the least two\n * frequent nodes.\n */\n node = elems; /* next internal node of the tree */\n do {\n //pqremove(s, tree, n); /* n = node of least frequency */\n /*** pqremove ***/\n n = s.heap[1/*SMALLEST*/];\n s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--];\n pqdownheap(s, tree, 1/*SMALLEST*/);\n /***/\n\n m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */\n\n s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */\n s.heap[--s.heap_max] = m;\n\n /* Create a new node father of n and m */\n tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/;\n s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1;\n tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node;\n\n /* and insert the new node in the heap */\n s.heap[1/*SMALLEST*/] = node++;\n pqdownheap(s, tree, 1/*SMALLEST*/);\n\n } while (s.heap_len >= 2);\n\n s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];\n\n /* At this point, the fields freq and dad are set. We can now\n * generate the bit lengths.\n */\n gen_bitlen(s, desc);\n\n /* The field len is now set, we can generate the bit codes */\n gen_codes(tree, max_code, s.bl_count);\n}\n\n\n/* ===========================================================================\n * Scan a literal or distance tree to determine the frequencies of the codes\n * in the bit length tree.\n */\nfunction scan_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n s.bl_tree[curlen * 2]/*.Freq*/ += count;\n\n } else if (curlen !== 0) {\n\n if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; }\n s.bl_tree[REP_3_6 * 2]/*.Freq*/++;\n\n } else if (count <= 10) {\n s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++;\n\n } else {\n s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;\n }\n\n count = 0;\n prevlen = curlen;\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Send a literal or distance tree in compressed form, using the codes in\n * bl_tree.\n */\nfunction send_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n /* tree[max_code+1].Len = -1; */ /* guard already set */\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n do { send_code(s, curlen, s.bl_tree); } while (--count !== 0);\n\n } else if (curlen !== 0) {\n if (curlen !== prevlen) {\n send_code(s, curlen, s.bl_tree);\n count--;\n }\n //Assert(count >= 3 && count <= 6, \" 3_6?\");\n send_code(s, REP_3_6, s.bl_tree);\n send_bits(s, count - 3, 2);\n\n } else if (count <= 10) {\n send_code(s, REPZ_3_10, s.bl_tree);\n send_bits(s, count - 3, 3);\n\n } else {\n send_code(s, REPZ_11_138, s.bl_tree);\n send_bits(s, count - 11, 7);\n }\n\n count = 0;\n prevlen = curlen;\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Construct the Huffman tree for the bit lengths and return the index in\n * bl_order of the last bit length code to send.\n */\nfunction build_bl_tree(s) {\n var max_blindex; /* index of last bit length code of non zero freq */\n\n /* Determine the bit length frequencies for literal and distance trees */\n scan_tree(s, s.dyn_ltree, s.l_desc.max_code);\n scan_tree(s, s.dyn_dtree, s.d_desc.max_code);\n\n /* Build the bit length tree: */\n build_tree(s, s.bl_desc);\n /* opt_len now includes the length of the tree representations, except\n * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.\n */\n\n /* Determine the number of bit length codes to send. The pkzip format\n * requires that at least 4 bit length codes be sent. (appnote.txt says\n * 3 but the actual value used is 4.)\n */\n for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {\n if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {\n break;\n }\n }\n /* Update opt_len to include the bit length tree and counts */\n s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;\n //Tracev((stderr, \"\\ndyn trees: dyn %ld, stat %ld\",\n // s->opt_len, s->static_len));\n\n return max_blindex;\n}\n\n\n/* ===========================================================================\n * Send the header for a block using dynamic Huffman trees: the counts, the\n * lengths of the bit length codes, the literal tree and the distance tree.\n * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.\n */\nfunction send_all_trees(s, lcodes, dcodes, blcodes)\n// deflate_state *s;\n// int lcodes, dcodes, blcodes; /* number of codes for each tree */\n{\n var rank; /* index in bl_order */\n\n //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, \"not enough codes\");\n //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,\n // \"too many codes\");\n //Tracev((stderr, \"\\nbl counts: \"));\n send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */\n send_bits(s, dcodes - 1, 5);\n send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */\n for (rank = 0; rank < blcodes; rank++) {\n //Tracev((stderr, \"\\nbl code %2d \", bl_order[rank]));\n send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);\n }\n //Tracev((stderr, \"\\nbl tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */\n //Tracev((stderr, \"\\nlit tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */\n //Tracev((stderr, \"\\ndist tree: sent %ld\", s->bits_sent));\n}\n\n\n/* ===========================================================================\n * Check if the data type is TEXT or BINARY, using the following algorithm:\n * - TEXT if the two conditions below are satisfied:\n * a) There are no non-portable control characters belonging to the\n * \"black list\" (0..6, 14..25, 28..31).\n * b) There is at least one printable character belonging to the\n * \"white list\" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).\n * - BINARY otherwise.\n * - The following partially-portable control characters form a\n * \"gray list\" that is ignored in this detection algorithm:\n * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).\n * IN assertion: the fields Freq of dyn_ltree are set.\n */\nfunction detect_data_type(s) {\n /* black_mask is the bit mask of black-listed bytes\n * set bits 0..6, 14..25, and 28..31\n * 0xf3ffc07f = binary 11110011111111111100000001111111\n */\n var black_mask = 0xf3ffc07f;\n var n;\n\n /* Check for non-textual (\"black-listed\") bytes. */\n for (n = 0; n <= 31; n++, black_mask >>>= 1) {\n if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) {\n return Z_BINARY;\n }\n }\n\n /* Check for textual (\"white-listed\") bytes. */\n if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 ||\n s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n for (n = 32; n < LITERALS; n++) {\n if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n }\n\n /* There are no \"black-listed\" or \"white-listed\" bytes:\n * this stream either is empty or has tolerated (\"gray-listed\") bytes only.\n */\n return Z_BINARY;\n}\n\n\nvar static_init_done = false;\n\n/* ===========================================================================\n * Initialize the tree data structures for a new zlib stream.\n */\nfunction _tr_init(s)\n{\n\n if (!static_init_done) {\n tr_static_init();\n static_init_done = true;\n }\n\n s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc);\n s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc);\n s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc);\n\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n /* Initialize the first block of the first file: */\n init_block(s);\n}\n\n\n/* ===========================================================================\n * Send a stored block\n */\nfunction _tr_stored_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */\n copy_block(s, buf, stored_len, true); /* with header */\n}\n\n\n/* ===========================================================================\n * Send one empty static block to give enough lookahead for inflate.\n * This takes 10 bits, of which 7 may remain in the bit buffer.\n */\nfunction _tr_align(s) {\n send_bits(s, STATIC_TREES << 1, 3);\n send_code(s, END_BLOCK, static_ltree);\n bi_flush(s);\n}\n\n\n/* ===========================================================================\n * Determine the best encoding for the current block: dynamic trees, static\n * trees or store, and output the encoded block to the zip file.\n */\nfunction _tr_flush_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block, or NULL if too old */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n var opt_lenb, static_lenb; /* opt_len and static_len in bytes */\n var max_blindex = 0; /* index of last bit length code of non zero freq */\n\n /* Build the Huffman trees unless a stored block is forced */\n if (s.level > 0) {\n\n /* Check if the file is binary or text */\n if (s.strm.data_type === Z_UNKNOWN) {\n s.strm.data_type = detect_data_type(s);\n }\n\n /* Construct the literal and distance trees */\n build_tree(s, s.l_desc);\n // Tracev((stderr, \"\\nlit data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n\n build_tree(s, s.d_desc);\n // Tracev((stderr, \"\\ndist data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n /* At this point, opt_len and static_len are the total bit lengths of\n * the compressed block data, excluding the tree representations.\n */\n\n /* Build the bit length tree for the above two trees, and get the index\n * in bl_order of the last bit length code to send.\n */\n max_blindex = build_bl_tree(s);\n\n /* Determine the best encoding. Compute the block lengths in bytes. */\n opt_lenb = (s.opt_len + 3 + 7) >>> 3;\n static_lenb = (s.static_len + 3 + 7) >>> 3;\n\n // Tracev((stderr, \"\\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u \",\n // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,\n // s->last_lit));\n\n if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; }\n\n } else {\n // Assert(buf != (char*)0, \"lost buf\");\n opt_lenb = static_lenb = stored_len + 5; /* force a stored block */\n }\n\n if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) {\n /* 4: two words for the lengths */\n\n /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.\n * Otherwise we can't have processed more than WSIZE input bytes since\n * the last block flush, because compression would have been\n * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to\n * transform a block into a stored block.\n */\n _tr_stored_block(s, buf, stored_len, last);\n\n } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) {\n\n send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3);\n compress_block(s, static_ltree, static_dtree);\n\n } else {\n send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);\n send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1);\n compress_block(s, s.dyn_ltree, s.dyn_dtree);\n }\n // Assert (s->compressed_len == s->bits_sent, \"bad compressed size\");\n /* The above check is made mod 2^32, for files larger than 512 MB\n * and uLong implemented on 32 bits.\n */\n init_block(s);\n\n if (last) {\n bi_windup(s);\n }\n // Tracev((stderr,\"\\ncomprlen %lu(%lu) \", s->compressed_len>>3,\n // s->compressed_len-7*last));\n}\n\n/* ===========================================================================\n * Save the match info and tally the frequency counts. Return true if\n * the current block must be flushed.\n */\nfunction _tr_tally(s, dist, lc)\n// deflate_state *s;\n// unsigned dist; /* distance of matched string */\n// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */\n{\n //var out_length, in_length, dcode;\n\n s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff;\n s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff;\n\n s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff;\n s.last_lit++;\n\n if (dist === 0) {\n /* lc is the unmatched char */\n s.dyn_ltree[lc * 2]/*.Freq*/++;\n } else {\n s.matches++;\n /* Here, lc is the match length - MIN_MATCH */\n dist--; /* dist = match distance - 1 */\n //Assert((ush)dist < (ush)MAX_DIST(s) &&\n // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&\n // (ush)d_code(dist) < (ush)D_CODES, \"_tr_tally: bad match\");\n\n s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++;\n s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++;\n }\n\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n\n//#ifdef TRUNCATE_BLOCK\n// /* Try to guess if it is profitable to stop the current block here */\n// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) {\n// /* Compute an upper bound for the compressed length */\n// out_length = s.last_lit*8;\n// in_length = s.strstart - s.block_start;\n//\n// for (dcode = 0; dcode < D_CODES; dcode++) {\n// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]);\n// }\n// out_length >>>= 3;\n// //Tracev((stderr,\"\\nlast_lit %u, in %ld, out ~%ld(%ld%%) \",\n// // s->last_lit, in_length, out_length,\n// // 100L - out_length*100L/in_length));\n// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) {\n// return true;\n// }\n// }\n//#endif\n\n return (s.last_lit === s.lit_bufsize - 1);\n /* We avoid equality with lit_bufsize because of wraparound at 64K\n * on 16 bit machines and because stored blocks are restricted to\n * 64K-1 bytes.\n */\n}\n\nexports._tr_init = _tr_init;\nexports._tr_stored_block = _tr_stored_block;\nexports._tr_flush_block = _tr_flush_block;\nexports._tr_tally = _tr_tally;\nexports._tr_align = _tr_align;\n","'use strict';\n\n// Note: adler32 takes 12% for level 0 and 2% for level 6.\n// It isn't worth it to make additional optimizations as in original.\n// Small size is preferable.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction adler32(adler, buf, len, pos) {\n var s1 = (adler & 0xffff) |0,\n s2 = ((adler >>> 16) & 0xffff) |0,\n n = 0;\n\n while (len !== 0) {\n // Set limit ~ twice less than 5552, to keep\n // s2 in 31-bits, because we force signed ints.\n // in other case %= will fail.\n n = len > 2000 ? 2000 : len;\n len -= n;\n\n do {\n s1 = (s1 + buf[pos++]) |0;\n s2 = (s2 + s1) |0;\n } while (--n);\n\n s1 %= 65521;\n s2 %= 65521;\n }\n\n return (s1 | (s2 << 16)) |0;\n}\n\n\nmodule.exports = adler32;\n","'use strict';\n\n// Note: we can't get significant speed boost here.\n// So write code to minimize size - no pregenerated tables\n// and array tools dependencies.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// Use ordinary array, since untyped makes no boost here\nfunction makeTable() {\n var c, table = [];\n\n for (var n = 0; n < 256; n++) {\n c = n;\n for (var k = 0; k < 8; k++) {\n c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));\n }\n table[n] = c;\n }\n\n return table;\n}\n\n// Create table on load. Just 255 signed longs. Not a problem.\nvar crcTable = makeTable();\n\n\nfunction crc32(crc, buf, len, pos) {\n var t = crcTable,\n end = pos + len;\n\n crc ^= -1;\n\n for (var i = pos; i < end; i++) {\n crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];\n }\n\n return (crc ^ (-1)); // >>> 0;\n}\n\n\nmodule.exports = crc32;\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\nvar trees = require('./trees');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar msg = require('./messages');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\nvar Z_NO_FLUSH = 0;\nvar Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\nvar Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\n//var Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\n//var Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\n//var Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n\n/* compression levels */\n//var Z_NO_COMPRESSION = 0;\n//var Z_BEST_SPEED = 1;\n//var Z_BEST_COMPRESSION = 9;\nvar Z_DEFAULT_COMPRESSION = -1;\n\n\nvar Z_FILTERED = 1;\nvar Z_HUFFMAN_ONLY = 2;\nvar Z_RLE = 3;\nvar Z_FIXED = 4;\nvar Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\n//var Z_BINARY = 0;\n//var Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n/*============================================================================*/\n\n\nvar MAX_MEM_LEVEL = 9;\n/* Maximum value for memLevel in deflateInit2 */\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_MEM_LEVEL = 8;\n\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\nvar D_CODES = 30;\n/* number of distance codes */\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\nvar MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);\n\nvar PRESET_DICT = 0x20;\n\nvar INIT_STATE = 42;\nvar EXTRA_STATE = 69;\nvar NAME_STATE = 73;\nvar COMMENT_STATE = 91;\nvar HCRC_STATE = 103;\nvar BUSY_STATE = 113;\nvar FINISH_STATE = 666;\n\nvar BS_NEED_MORE = 1; /* block not completed, need more input or more output */\nvar BS_BLOCK_DONE = 2; /* block flush performed */\nvar BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */\nvar BS_FINISH_DONE = 4; /* finish done, accept no more input or output */\n\nvar OS_CODE = 0x03; // Unix :) . Don't detect, use this default.\n\nfunction err(strm, errorCode) {\n strm.msg = msg[errorCode];\n return errorCode;\n}\n\nfunction rank(f) {\n return ((f) << 1) - ((f) > 4 ? 9 : 0);\n}\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n\n/* =========================================================================\n * Flush as much pending output as possible. All deflate() output goes\n * through this function so some applications may wish to modify it\n * to avoid allocating a large strm->output buffer and copying into it.\n * (See also read_buf()).\n */\nfunction flush_pending(strm) {\n var s = strm.state;\n\n //_tr_flush_bits(s);\n var len = s.pending;\n if (len > strm.avail_out) {\n len = strm.avail_out;\n }\n if (len === 0) { return; }\n\n utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out);\n strm.next_out += len;\n s.pending_out += len;\n strm.total_out += len;\n strm.avail_out -= len;\n s.pending -= len;\n if (s.pending === 0) {\n s.pending_out = 0;\n }\n}\n\n\nfunction flush_block_only(s, last) {\n trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last);\n s.block_start = s.strstart;\n flush_pending(s.strm);\n}\n\n\nfunction put_byte(s, b) {\n s.pending_buf[s.pending++] = b;\n}\n\n\n/* =========================================================================\n * Put a short in the pending buffer. The 16-bit value is put in MSB order.\n * IN assertion: the stream state is correct and there is enough room in\n * pending_buf.\n */\nfunction putShortMSB(s, b) {\n// put_byte(s, (Byte)(b >> 8));\n// put_byte(s, (Byte)(b & 0xff));\n s.pending_buf[s.pending++] = (b >>> 8) & 0xff;\n s.pending_buf[s.pending++] = b & 0xff;\n}\n\n\n/* ===========================================================================\n * Read a new buffer from the current input stream, update the adler32\n * and total number of bytes read. All deflate() input goes through\n * this function so some applications may wish to modify it to avoid\n * allocating a large strm->input buffer and copying from it.\n * (See also flush_pending()).\n */\nfunction read_buf(strm, buf, start, size) {\n var len = strm.avail_in;\n\n if (len > size) { len = size; }\n if (len === 0) { return 0; }\n\n strm.avail_in -= len;\n\n // zmemcpy(buf, strm->next_in, len);\n utils.arraySet(buf, strm.input, strm.next_in, len, start);\n if (strm.state.wrap === 1) {\n strm.adler = adler32(strm.adler, buf, len, start);\n }\n\n else if (strm.state.wrap === 2) {\n strm.adler = crc32(strm.adler, buf, len, start);\n }\n\n strm.next_in += len;\n strm.total_in += len;\n\n return len;\n}\n\n\n/* ===========================================================================\n * Set match_start to the longest match starting at the given string and\n * return its length. Matches shorter or equal to prev_length are discarded,\n * in which case the result is equal to prev_length and match_start is\n * garbage.\n * IN assertions: cur_match is the head of the hash chain for the current\n * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1\n * OUT assertion: the match length is not greater than s->lookahead.\n */\nfunction longest_match(s, cur_match) {\n var chain_length = s.max_chain_length; /* max hash chain length */\n var scan = s.strstart; /* current string */\n var match; /* matched string */\n var len; /* length of current match */\n var best_len = s.prev_length; /* best match length so far */\n var nice_match = s.nice_match; /* stop if match long enough */\n var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?\n s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;\n\n var _win = s.window; // shortcut\n\n var wmask = s.w_mask;\n var prev = s.prev;\n\n /* Stop when cur_match becomes <= limit. To simplify the code,\n * we prevent matches with the string of window index 0.\n */\n\n var strend = s.strstart + MAX_MATCH;\n var scan_end1 = _win[scan + best_len - 1];\n var scan_end = _win[scan + best_len];\n\n /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.\n * It is easy to get rid of this optimization if necessary.\n */\n // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, \"Code too clever\");\n\n /* Do not waste too much time if we already have a good match: */\n if (s.prev_length >= s.good_match) {\n chain_length >>= 2;\n }\n /* Do not look for matches beyond the end of the input. This is necessary\n * to make deflate deterministic.\n */\n if (nice_match > s.lookahead) { nice_match = s.lookahead; }\n\n // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, \"need lookahead\");\n\n do {\n // Assert(cur_match < s->strstart, \"no future\");\n match = cur_match;\n\n /* Skip to next match if the match length cannot increase\n * or if the match length is less than 2. Note that the checks below\n * for insufficient lookahead only occur occasionally for performance\n * reasons. Therefore uninitialized memory will be accessed, and\n * conditional jumps will be made that depend on those values.\n * However the length of the match is limited to the lookahead, so\n * the output of deflate is not affected by the uninitialized values.\n */\n\n if (_win[match + best_len] !== scan_end ||\n _win[match + best_len - 1] !== scan_end1 ||\n _win[match] !== _win[scan] ||\n _win[++match] !== _win[scan + 1]) {\n continue;\n }\n\n /* The check at best_len-1 can be removed because it will be made\n * again later. (This heuristic is not always a win.)\n * It is not necessary to compare scan[2] and match[2] since they\n * are always equal when the other bytes match, given that\n * the hash keys are equal and that HASH_BITS >= 8.\n */\n scan += 2;\n match++;\n // Assert(*scan == *match, \"match[2]?\");\n\n /* We check for insufficient lookahead only every 8th comparison;\n * the 256th check will be made at strstart+258.\n */\n do {\n /*jshint noempty:false*/\n } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n scan < strend);\n\n // Assert(scan <= s->window+(unsigned)(s->window_size-1), \"wild scan\");\n\n len = MAX_MATCH - (strend - scan);\n scan = strend - MAX_MATCH;\n\n if (len > best_len) {\n s.match_start = cur_match;\n best_len = len;\n if (len >= nice_match) {\n break;\n }\n scan_end1 = _win[scan + best_len - 1];\n scan_end = _win[scan + best_len];\n }\n } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0);\n\n if (best_len <= s.lookahead) {\n return best_len;\n }\n return s.lookahead;\n}\n\n\n/* ===========================================================================\n * Fill the window when the lookahead becomes insufficient.\n * Updates strstart and lookahead.\n *\n * IN assertion: lookahead < MIN_LOOKAHEAD\n * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD\n * At least one byte has been read, or avail_in == 0; reads are\n * performed for at least two bytes (required for the zip translate_eol\n * option -- not supported here).\n */\nfunction fill_window(s) {\n var _w_size = s.w_size;\n var p, n, m, more, str;\n\n //Assert(s->lookahead < MIN_LOOKAHEAD, \"already enough lookahead\");\n\n do {\n more = s.window_size - s.lookahead - s.strstart;\n\n // JS ints have 32 bit, block below not needed\n /* Deal with !@#$% 64K limit: */\n //if (sizeof(int) <= 2) {\n // if (more == 0 && s->strstart == 0 && s->lookahead == 0) {\n // more = wsize;\n //\n // } else if (more == (unsigned)(-1)) {\n // /* Very unlikely, but possible on 16 bit machine if\n // * strstart == 0 && lookahead == 1 (input done a byte at time)\n // */\n // more--;\n // }\n //}\n\n\n /* If the window is almost full and there is insufficient lookahead,\n * move the upper half to the lower one to make room in the upper half.\n */\n if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) {\n\n utils.arraySet(s.window, s.window, _w_size, _w_size, 0);\n s.match_start -= _w_size;\n s.strstart -= _w_size;\n /* we now have strstart >= MAX_DIST */\n s.block_start -= _w_size;\n\n /* Slide the hash table (could be avoided with 32 bit values\n at the expense of memory usage). We slide even when level == 0\n to keep the hash table consistent if we switch back to level > 0\n later. (Using level 0 permanently is not an optimal usage of\n zlib, so we don't care about this pathological case.)\n */\n\n n = s.hash_size;\n p = n;\n do {\n m = s.head[--p];\n s.head[p] = (m >= _w_size ? m - _w_size : 0);\n } while (--n);\n\n n = _w_size;\n p = n;\n do {\n m = s.prev[--p];\n s.prev[p] = (m >= _w_size ? m - _w_size : 0);\n /* If n is not on any hash chain, prev[n] is garbage but\n * its value will never be used.\n */\n } while (--n);\n\n more += _w_size;\n }\n if (s.strm.avail_in === 0) {\n break;\n }\n\n /* If there was no sliding:\n * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&\n * more == window_size - lookahead - strstart\n * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)\n * => more >= window_size - 2*WSIZE + 2\n * In the BIG_MEM or MMAP case (not yet supported),\n * window_size == input_size + MIN_LOOKAHEAD &&\n * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.\n * Otherwise, window_size == 2*WSIZE so more >= 2.\n * If there was sliding, more >= WSIZE. So in all cases, more >= 2.\n */\n //Assert(more >= 2, \"more < 2\");\n n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more);\n s.lookahead += n;\n\n /* Initialize the hash value now that we have some input: */\n if (s.lookahead + s.insert >= MIN_MATCH) {\n str = s.strstart - s.insert;\n s.ins_h = s.window[str];\n\n /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask;\n//#if MIN_MATCH != 3\n// Call update_hash() MIN_MATCH-3 more times\n//#endif\n while (s.insert) {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = str;\n str++;\n s.insert--;\n if (s.lookahead + s.insert < MIN_MATCH) {\n break;\n }\n }\n }\n /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,\n * but this is not important since only literal bytes will be emitted.\n */\n\n } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0);\n\n /* If the WIN_INIT bytes after the end of the current data have never been\n * written, then zero those bytes in order to avoid memory check reports of\n * the use of uninitialized (or uninitialised as Julian writes) bytes by\n * the longest match routines. Update the high water mark for the next\n * time through here. WIN_INIT is set to MAX_MATCH since the longest match\n * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.\n */\n// if (s.high_water < s.window_size) {\n// var curr = s.strstart + s.lookahead;\n// var init = 0;\n//\n// if (s.high_water < curr) {\n// /* Previous high water mark below current data -- zero WIN_INIT\n// * bytes or up to end of window, whichever is less.\n// */\n// init = s.window_size - curr;\n// if (init > WIN_INIT)\n// init = WIN_INIT;\n// zmemzero(s->window + curr, (unsigned)init);\n// s->high_water = curr + init;\n// }\n// else if (s->high_water < (ulg)curr + WIN_INIT) {\n// /* High water mark at or above current data, but below current data\n// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up\n// * to end of window, whichever is less.\n// */\n// init = (ulg)curr + WIN_INIT - s->high_water;\n// if (init > s->window_size - s->high_water)\n// init = s->window_size - s->high_water;\n// zmemzero(s->window + s->high_water, (unsigned)init);\n// s->high_water += init;\n// }\n// }\n//\n// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,\n// \"not enough room for search\");\n}\n\n/* ===========================================================================\n * Copy without compression as much as possible from the input stream, return\n * the current block state.\n * This function does not insert new strings in the dictionary since\n * uncompressible data is probably not useful. This function is used\n * only for the level=0 compression option.\n * NOTE: this function should be optimized to avoid extra copying from\n * window to pending_buf.\n */\nfunction deflate_stored(s, flush) {\n /* Stored blocks are limited to 0xffff bytes, pending_buf is limited\n * to pending_buf_size, and each stored block has a 5 byte header:\n */\n var max_block_size = 0xffff;\n\n if (max_block_size > s.pending_buf_size - 5) {\n max_block_size = s.pending_buf_size - 5;\n }\n\n /* Copy as much as possible from input to output: */\n for (;;) {\n /* Fill the window as much as possible: */\n if (s.lookahead <= 1) {\n\n //Assert(s->strstart < s->w_size+MAX_DIST(s) ||\n // s->block_start >= (long)s->w_size, \"slide too late\");\n// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) ||\n// s.block_start >= s.w_size)) {\n// throw new Error(\"slide too late\");\n// }\n\n fill_window(s);\n if (s.lookahead === 0 && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n\n if (s.lookahead === 0) {\n break;\n }\n /* flush the current block */\n }\n //Assert(s->block_start >= 0L, \"block gone\");\n// if (s.block_start < 0) throw new Error(\"block gone\");\n\n s.strstart += s.lookahead;\n s.lookahead = 0;\n\n /* Emit a stored block if pending_buf will be full: */\n var max_start = s.block_start + max_block_size;\n\n if (s.strstart === 0 || s.strstart >= max_start) {\n /* strstart == 0 is possible when wraparound on 16-bit machine */\n s.lookahead = s.strstart - max_start;\n s.strstart = max_start;\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n\n\n }\n /* Flush if we may have to slide, otherwise block_start may become\n * negative and the data will be gone:\n */\n if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n\n s.insert = 0;\n\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n\n if (s.strstart > s.block_start) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_NEED_MORE;\n}\n\n/* ===========================================================================\n * Compress as much as possible from the input stream, return the current\n * block state.\n * This function does not perform lazy evaluation of matches and inserts\n * new strings in the dictionary only for unmatched strings or for short\n * matches. It is used only for the fast compression options.\n */\nfunction deflate_fast(s, flush) {\n var hash_head; /* head of the hash chain */\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) {\n break; /* flush the current block */\n }\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n * At this point we have always match_length < MIN_MATCH\n */\n if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n }\n if (s.match_length >= MIN_MATCH) {\n // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only\n\n /*** _tr_tally_dist(s, s.strstart - s.match_start,\n s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n\n /* Insert new strings in the hash table only if the match length\n * is not too large. This saves time but degrades compression.\n */\n if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) {\n s.match_length--; /* string at strstart already in table */\n do {\n s.strstart++;\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n /* strstart never exceeds WSIZE-MAX_MATCH, so there are\n * always MIN_MATCH bytes ahead.\n */\n } while (--s.match_length !== 0);\n s.strstart++;\n } else\n {\n s.strstart += s.match_length;\n s.match_length = 0;\n s.ins_h = s.window[s.strstart];\n /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask;\n\n//#if MIN_MATCH != 3\n// Call UPDATE_HASH() MIN_MATCH-3 more times\n//#endif\n /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not\n * matter since it will be recomputed at next deflate call.\n */\n }\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s.window[s.strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1);\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * Same as above, but achieves better compression. We use a lazy\n * evaluation for matches: a match is finally adopted only if there is\n * no better match at the next window position.\n */\nfunction deflate_slow(s, flush) {\n var hash_head; /* head of hash chain */\n var bflush; /* set if current block must be flushed */\n\n var max_insert;\n\n /* Process the input block. */\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n */\n s.prev_length = s.match_length;\n s.prev_match = s.match_start;\n s.match_length = MIN_MATCH - 1;\n\n if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match &&\n s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n\n if (s.match_length <= 5 &&\n (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) {\n\n /* If prev_match is also MIN_MATCH, match_start is garbage\n * but we will ignore the current match anyway.\n */\n s.match_length = MIN_MATCH - 1;\n }\n }\n /* If there was a match at the previous step and the current\n * match is not better, output the previous match:\n */\n if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) {\n max_insert = s.strstart + s.lookahead - MIN_MATCH;\n /* Do not insert strings in hash table beyond this. */\n\n //check_match(s, s.strstart-1, s.prev_match, s.prev_length);\n\n /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match,\n s.prev_length - MIN_MATCH, bflush);***/\n bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH);\n /* Insert in hash table all strings up to the end of the match.\n * strstart-1 and strstart are already inserted. If there is not\n * enough lookahead, the last two strings are not inserted in\n * the hash table.\n */\n s.lookahead -= s.prev_length - 1;\n s.prev_length -= 2;\n do {\n if (++s.strstart <= max_insert) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n } while (--s.prev_length !== 0);\n s.match_available = 0;\n s.match_length = MIN_MATCH - 1;\n s.strstart++;\n\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n } else if (s.match_available) {\n /* If there was no match at the previous position, output a\n * single literal. If there was a match but the current match\n * is longer, truncate the previous match to a single literal.\n */\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n if (bflush) {\n /*** FLUSH_BLOCK_ONLY(s, 0) ***/\n flush_block_only(s, false);\n /***/\n }\n s.strstart++;\n s.lookahead--;\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n } else {\n /* There is no previous match to compare with, wait for\n * the next step to decide.\n */\n s.match_available = 1;\n s.strstart++;\n s.lookahead--;\n }\n }\n //Assert (flush != Z_NO_FLUSH, \"no flush?\");\n if (s.match_available) {\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n s.match_available = 0;\n }\n s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_BLOCK_DONE;\n}\n\n\n/* ===========================================================================\n * For Z_RLE, simply look for runs of bytes, generate matches only of distance\n * one. Do not maintain a hash table. (It will be regenerated if this run of\n * deflate switches away from Z_RLE.)\n */\nfunction deflate_rle(s, flush) {\n var bflush; /* set if current block must be flushed */\n var prev; /* byte at distance one to match */\n var scan, strend; /* scan goes up to strend for length of run */\n\n var _win = s.window;\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the longest run, plus one for the unrolled loop.\n */\n if (s.lookahead <= MAX_MATCH) {\n fill_window(s);\n if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* See how many times the previous byte repeats */\n s.match_length = 0;\n if (s.lookahead >= MIN_MATCH && s.strstart > 0) {\n scan = s.strstart - 1;\n prev = _win[scan];\n if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) {\n strend = s.strstart + MAX_MATCH;\n do {\n /*jshint noempty:false*/\n } while (prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n scan < strend);\n s.match_length = MAX_MATCH - (strend - scan);\n if (s.match_length > s.lookahead) {\n s.match_length = s.lookahead;\n }\n }\n //Assert(scan <= s->window+(uInt)(s->window_size-1), \"wild scan\");\n }\n\n /* Emit match if have run of MIN_MATCH or longer, else emit literal */\n if (s.match_length >= MIN_MATCH) {\n //check_match(s, s.strstart, s.strstart - 1, s.match_length);\n\n /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n s.strstart += s.match_length;\n s.match_length = 0;\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.\n * (It will be regenerated if this run of deflate switches away from Huffman.)\n */\nfunction deflate_huff(s, flush) {\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we have a literal to write. */\n if (s.lookahead === 0) {\n fill_window(s);\n if (s.lookahead === 0) {\n if (flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n break; /* flush the current block */\n }\n }\n\n /* Output a literal byte */\n s.match_length = 0;\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n s.lookahead--;\n s.strstart++;\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* Values for max_lazy_match, good_match and max_chain_length, depending on\n * the desired pack level (0..9). The values given below have been tuned to\n * exclude worst case performance for pathological files. Better values may be\n * found for specific files.\n */\nfunction Config(good_length, max_lazy, nice_length, max_chain, func) {\n this.good_length = good_length;\n this.max_lazy = max_lazy;\n this.nice_length = nice_length;\n this.max_chain = max_chain;\n this.func = func;\n}\n\nvar configuration_table;\n\nconfiguration_table = [\n /* good lazy nice chain */\n new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */\n new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */\n new Config(4, 5, 16, 8, deflate_fast), /* 2 */\n new Config(4, 6, 32, 32, deflate_fast), /* 3 */\n\n new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */\n new Config(8, 16, 32, 32, deflate_slow), /* 5 */\n new Config(8, 16, 128, 128, deflate_slow), /* 6 */\n new Config(8, 32, 128, 256, deflate_slow), /* 7 */\n new Config(32, 128, 258, 1024, deflate_slow), /* 8 */\n new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */\n];\n\n\n/* ===========================================================================\n * Initialize the \"longest match\" routines for a new zlib stream\n */\nfunction lm_init(s) {\n s.window_size = 2 * s.w_size;\n\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n\n /* Set the default configuration parameters:\n */\n s.max_lazy_match = configuration_table[s.level].max_lazy;\n s.good_match = configuration_table[s.level].good_length;\n s.nice_match = configuration_table[s.level].nice_length;\n s.max_chain_length = configuration_table[s.level].max_chain;\n\n s.strstart = 0;\n s.block_start = 0;\n s.lookahead = 0;\n s.insert = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n s.ins_h = 0;\n}\n\n\nfunction DeflateState() {\n this.strm = null; /* pointer back to this zlib stream */\n this.status = 0; /* as the name implies */\n this.pending_buf = null; /* output still pending */\n this.pending_buf_size = 0; /* size of pending_buf */\n this.pending_out = 0; /* next pending byte to output to the stream */\n this.pending = 0; /* nb of bytes in the pending buffer */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.gzhead = null; /* gzip header information to write */\n this.gzindex = 0; /* where in extra, name, or comment */\n this.method = Z_DEFLATED; /* can only be DEFLATED */\n this.last_flush = -1; /* value of flush param for previous deflate call */\n\n this.w_size = 0; /* LZ77 window size (32K by default) */\n this.w_bits = 0; /* log2(w_size) (8..16) */\n this.w_mask = 0; /* w_size - 1 */\n\n this.window = null;\n /* Sliding window. Input bytes are read into the second half of the window,\n * and move to the first half later to keep a dictionary of at least wSize\n * bytes. With this organization, matches are limited to a distance of\n * wSize-MAX_MATCH bytes, but this ensures that IO is always\n * performed with a length multiple of the block size.\n */\n\n this.window_size = 0;\n /* Actual size of window: 2*wSize, except when the user input buffer\n * is directly used as sliding window.\n */\n\n this.prev = null;\n /* Link to older string with same hash index. To limit the size of this\n * array to 64K, this link is maintained only for the last 32K strings.\n * An index in this array is thus a window index modulo 32K.\n */\n\n this.head = null; /* Heads of the hash chains or NIL. */\n\n this.ins_h = 0; /* hash index of string to be inserted */\n this.hash_size = 0; /* number of elements in hash table */\n this.hash_bits = 0; /* log2(hash_size) */\n this.hash_mask = 0; /* hash_size-1 */\n\n this.hash_shift = 0;\n /* Number of bits by which ins_h must be shifted at each input\n * step. It must be such that after MIN_MATCH steps, the oldest\n * byte no longer takes part in the hash key, that is:\n * hash_shift * MIN_MATCH >= hash_bits\n */\n\n this.block_start = 0;\n /* Window position at the beginning of the current output block. Gets\n * negative when the window is moved backwards.\n */\n\n this.match_length = 0; /* length of best match */\n this.prev_match = 0; /* previous match */\n this.match_available = 0; /* set if previous match exists */\n this.strstart = 0; /* start of string to insert */\n this.match_start = 0; /* start of matching string */\n this.lookahead = 0; /* number of valid bytes ahead in window */\n\n this.prev_length = 0;\n /* Length of the best match at previous step. Matches not greater than this\n * are discarded. This is used in the lazy match evaluation.\n */\n\n this.max_chain_length = 0;\n /* To speed up deflation, hash chains are never searched beyond this\n * length. A higher limit improves compression ratio but degrades the\n * speed.\n */\n\n this.max_lazy_match = 0;\n /* Attempt to find a better match only when the current match is strictly\n * smaller than this value. This mechanism is used only for compression\n * levels >= 4.\n */\n // That's alias to max_lazy_match, don't use directly\n //this.max_insert_length = 0;\n /* Insert new strings in the hash table only if the match length is not\n * greater than this length. This saves time but degrades compression.\n * max_insert_length is used only for compression levels <= 3.\n */\n\n this.level = 0; /* compression level (1..9) */\n this.strategy = 0; /* favor or force Huffman coding*/\n\n this.good_match = 0;\n /* Use a faster search when the previous match is longer than this */\n\n this.nice_match = 0; /* Stop searching when current match exceeds this */\n\n /* used by trees.c: */\n\n /* Didn't use ct_data typedef below to suppress compiler warning */\n\n // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */\n // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */\n // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */\n\n // Use flat array of DOUBLE size, with interleaved fata,\n // because JS does not support effective\n this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2);\n this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2);\n this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2);\n zero(this.dyn_ltree);\n zero(this.dyn_dtree);\n zero(this.bl_tree);\n\n this.l_desc = null; /* desc. for literal tree */\n this.d_desc = null; /* desc. for distance tree */\n this.bl_desc = null; /* desc. for bit length tree */\n\n //ush bl_count[MAX_BITS+1];\n this.bl_count = new utils.Buf16(MAX_BITS + 1);\n /* number of codes at each bit length for an optimal tree */\n\n //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */\n this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */\n zero(this.heap);\n\n this.heap_len = 0; /* number of elements in the heap */\n this.heap_max = 0; /* element of largest frequency */\n /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.\n * The same heap array is used to build all trees.\n */\n\n this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1];\n zero(this.depth);\n /* Depth of each subtree used as tie breaker for trees of equal frequency\n */\n\n this.l_buf = 0; /* buffer index for literals or lengths */\n\n this.lit_bufsize = 0;\n /* Size of match buffer for literals/lengths. There are 4 reasons for\n * limiting lit_bufsize to 64K:\n * - frequencies can be kept in 16 bit counters\n * - if compression is not successful for the first block, all input\n * data is still in the window so we can still emit a stored block even\n * when input comes from standard input. (This can also be done for\n * all blocks if lit_bufsize is not greater than 32K.)\n * - if compression is not successful for a file smaller than 64K, we can\n * even emit a stored file instead of a stored block (saving 5 bytes).\n * This is applicable only for zip (not gzip or zlib).\n * - creating new Huffman trees less frequently may not provide fast\n * adaptation to changes in the input data statistics. (Take for\n * example a binary file with poorly compressible code followed by\n * a highly compressible string table.) Smaller buffer sizes give\n * fast adaptation but have of course the overhead of transmitting\n * trees more frequently.\n * - I can't count above 4\n */\n\n this.last_lit = 0; /* running index in l_buf */\n\n this.d_buf = 0;\n /* Buffer index for distances. To simplify the code, d_buf and l_buf have\n * the same number of elements. To use different lengths, an extra flag\n * array would be necessary.\n */\n\n this.opt_len = 0; /* bit length of current block with optimal trees */\n this.static_len = 0; /* bit length of current block with static trees */\n this.matches = 0; /* number of string matches in current block */\n this.insert = 0; /* bytes at end of window left to insert */\n\n\n this.bi_buf = 0;\n /* Output buffer. bits are inserted starting at the bottom (least\n * significant bits).\n */\n this.bi_valid = 0;\n /* Number of valid bits in bi_buf. All bits above the last valid bit\n * are always zero.\n */\n\n // Used for window memory init. We safely ignore it for JS. That makes\n // sense only for pointers and memory check tools.\n //this.high_water = 0;\n /* High water mark offset in window for initialized bytes -- bytes above\n * this are set to zero in order to avoid memory check warnings when\n * longest match routines access bytes past the input. This is then\n * updated to the new high water mark.\n */\n}\n\n\nfunction deflateResetKeep(strm) {\n var s;\n\n if (!strm || !strm.state) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.total_in = strm.total_out = 0;\n strm.data_type = Z_UNKNOWN;\n\n s = strm.state;\n s.pending = 0;\n s.pending_out = 0;\n\n if (s.wrap < 0) {\n s.wrap = -s.wrap;\n /* was made negative by deflate(..., Z_FINISH); */\n }\n s.status = (s.wrap ? INIT_STATE : BUSY_STATE);\n strm.adler = (s.wrap === 2) ?\n 0 // crc32(0, Z_NULL, 0)\n :\n 1; // adler32(0, Z_NULL, 0)\n s.last_flush = Z_NO_FLUSH;\n trees._tr_init(s);\n return Z_OK;\n}\n\n\nfunction deflateReset(strm) {\n var ret = deflateResetKeep(strm);\n if (ret === Z_OK) {\n lm_init(strm.state);\n }\n return ret;\n}\n\n\nfunction deflateSetHeader(strm, head) {\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; }\n strm.state.gzhead = head;\n return Z_OK;\n}\n\n\nfunction deflateInit2(strm, level, method, windowBits, memLevel, strategy) {\n if (!strm) { // === Z_NULL\n return Z_STREAM_ERROR;\n }\n var wrap = 1;\n\n if (level === Z_DEFAULT_COMPRESSION) {\n level = 6;\n }\n\n if (windowBits < 0) { /* suppress zlib wrapper */\n wrap = 0;\n windowBits = -windowBits;\n }\n\n else if (windowBits > 15) {\n wrap = 2; /* write gzip wrapper instead */\n windowBits -= 16;\n }\n\n\n if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED ||\n windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||\n strategy < 0 || strategy > Z_FIXED) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n\n if (windowBits === 8) {\n windowBits = 9;\n }\n /* until 256-byte window bug fixed */\n\n var s = new DeflateState();\n\n strm.state = s;\n s.strm = strm;\n\n s.wrap = wrap;\n s.gzhead = null;\n s.w_bits = windowBits;\n s.w_size = 1 << s.w_bits;\n s.w_mask = s.w_size - 1;\n\n s.hash_bits = memLevel + 7;\n s.hash_size = 1 << s.hash_bits;\n s.hash_mask = s.hash_size - 1;\n s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH);\n\n s.window = new utils.Buf8(s.w_size * 2);\n s.head = new utils.Buf16(s.hash_size);\n s.prev = new utils.Buf16(s.w_size);\n\n // Don't need mem init magic for JS.\n //s.high_water = 0; /* nothing written to s->window yet */\n\n s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */\n\n s.pending_buf_size = s.lit_bufsize * 4;\n\n //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);\n //s->pending_buf = (uchf *) overlay;\n s.pending_buf = new utils.Buf8(s.pending_buf_size);\n\n // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)\n //s->d_buf = overlay + s->lit_bufsize/sizeof(ush);\n s.d_buf = 1 * s.lit_bufsize;\n\n //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;\n s.l_buf = (1 + 2) * s.lit_bufsize;\n\n s.level = level;\n s.strategy = strategy;\n s.method = method;\n\n return deflateReset(strm);\n}\n\nfunction deflateInit(strm, level) {\n return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);\n}\n\n\nfunction deflate(strm, flush) {\n var old_flush, s;\n var beg, val; // for gzip header write only\n\n if (!strm || !strm.state ||\n flush > Z_BLOCK || flush < 0) {\n return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR;\n }\n\n s = strm.state;\n\n if (!strm.output ||\n (!strm.input && strm.avail_in !== 0) ||\n (s.status === FINISH_STATE && flush !== Z_FINISH)) {\n return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);\n }\n\n s.strm = strm; /* just in case */\n old_flush = s.last_flush;\n s.last_flush = flush;\n\n /* Write the header */\n if (s.status === INIT_STATE) {\n\n if (s.wrap === 2) { // GZIP header\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n put_byte(s, 31);\n put_byte(s, 139);\n put_byte(s, 8);\n if (!s.gzhead) { // s->gzhead == Z_NULL\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, OS_CODE);\n s.status = BUSY_STATE;\n }\n else {\n put_byte(s, (s.gzhead.text ? 1 : 0) +\n (s.gzhead.hcrc ? 2 : 0) +\n (!s.gzhead.extra ? 0 : 4) +\n (!s.gzhead.name ? 0 : 8) +\n (!s.gzhead.comment ? 0 : 16)\n );\n put_byte(s, s.gzhead.time & 0xff);\n put_byte(s, (s.gzhead.time >> 8) & 0xff);\n put_byte(s, (s.gzhead.time >> 16) & 0xff);\n put_byte(s, (s.gzhead.time >> 24) & 0xff);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, s.gzhead.os & 0xff);\n if (s.gzhead.extra && s.gzhead.extra.length) {\n put_byte(s, s.gzhead.extra.length & 0xff);\n put_byte(s, (s.gzhead.extra.length >> 8) & 0xff);\n }\n if (s.gzhead.hcrc) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0);\n }\n s.gzindex = 0;\n s.status = EXTRA_STATE;\n }\n }\n else // DEFLATE header\n {\n var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8;\n var level_flags = -1;\n\n if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) {\n level_flags = 0;\n } else if (s.level < 6) {\n level_flags = 1;\n } else if (s.level === 6) {\n level_flags = 2;\n } else {\n level_flags = 3;\n }\n header |= (level_flags << 6);\n if (s.strstart !== 0) { header |= PRESET_DICT; }\n header += 31 - (header % 31);\n\n s.status = BUSY_STATE;\n putShortMSB(s, header);\n\n /* Save the adler32 of the preset dictionary: */\n if (s.strstart !== 0) {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n strm.adler = 1; // adler32(0L, Z_NULL, 0);\n }\n }\n\n//#ifdef GZIP\n if (s.status === EXTRA_STATE) {\n if (s.gzhead.extra/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n\n while (s.gzindex < (s.gzhead.extra.length & 0xffff)) {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n break;\n }\n }\n put_byte(s, s.gzhead.extra[s.gzindex] & 0xff);\n s.gzindex++;\n }\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (s.gzindex === s.gzhead.extra.length) {\n s.gzindex = 0;\n s.status = NAME_STATE;\n }\n }\n else {\n s.status = NAME_STATE;\n }\n }\n if (s.status === NAME_STATE) {\n if (s.gzhead.name/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.name.length) {\n val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.gzindex = 0;\n s.status = COMMENT_STATE;\n }\n }\n else {\n s.status = COMMENT_STATE;\n }\n }\n if (s.status === COMMENT_STATE) {\n if (s.gzhead.comment/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.comment.length) {\n val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.status = HCRC_STATE;\n }\n }\n else {\n s.status = HCRC_STATE;\n }\n }\n if (s.status === HCRC_STATE) {\n if (s.gzhead.hcrc) {\n if (s.pending + 2 > s.pending_buf_size) {\n flush_pending(strm);\n }\n if (s.pending + 2 <= s.pending_buf_size) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n s.status = BUSY_STATE;\n }\n }\n else {\n s.status = BUSY_STATE;\n }\n }\n//#endif\n\n /* Flush as much pending output as possible */\n if (s.pending !== 0) {\n flush_pending(strm);\n if (strm.avail_out === 0) {\n /* Since avail_out is 0, deflate will be called again with\n * more output space, but possibly with both pending and\n * avail_in equal to zero. There won't be anything to do,\n * but this is not an error situation so make sure we\n * return OK instead of BUF_ERROR at next call of deflate:\n */\n s.last_flush = -1;\n return Z_OK;\n }\n\n /* Make sure there is something to do and avoid duplicate consecutive\n * flushes. For repeated and useless calls with Z_FINISH, we keep\n * returning Z_STREAM_END instead of Z_BUF_ERROR.\n */\n } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) &&\n flush !== Z_FINISH) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* User must not provide more input after the first FINISH: */\n if (s.status === FINISH_STATE && strm.avail_in !== 0) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* Start a new block or continue the current one.\n */\n if (strm.avail_in !== 0 || s.lookahead !== 0 ||\n (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) {\n var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) :\n (s.strategy === Z_RLE ? deflate_rle(s, flush) :\n configuration_table[s.level].func(s, flush));\n\n if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) {\n s.status = FINISH_STATE;\n }\n if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) {\n if (strm.avail_out === 0) {\n s.last_flush = -1;\n /* avoid BUF_ERROR next call, see above */\n }\n return Z_OK;\n /* If flush != Z_NO_FLUSH && avail_out == 0, the next call\n * of deflate should use the same flush parameter to make sure\n * that the flush is complete. So we don't have to output an\n * empty block here, this will be done at next call. This also\n * ensures that for a very small output buffer, we emit at most\n * one empty block.\n */\n }\n if (bstate === BS_BLOCK_DONE) {\n if (flush === Z_PARTIAL_FLUSH) {\n trees._tr_align(s);\n }\n else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */\n\n trees._tr_stored_block(s, 0, 0, false);\n /* For a full flush, this empty block will be recognized\n * as a special marker by inflate_sync().\n */\n if (flush === Z_FULL_FLUSH) {\n /*** CLEAR_HASH(s); ***/ /* forget history */\n zero(s.head); // Fill with NIL (= 0);\n\n if (s.lookahead === 0) {\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n }\n }\n flush_pending(strm);\n if (strm.avail_out === 0) {\n s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */\n return Z_OK;\n }\n }\n }\n //Assert(strm->avail_out > 0, \"bug2\");\n //if (strm.avail_out <= 0) { throw new Error(\"bug2\");}\n\n if (flush !== Z_FINISH) { return Z_OK; }\n if (s.wrap <= 0) { return Z_STREAM_END; }\n\n /* Write the trailer */\n if (s.wrap === 2) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n put_byte(s, (strm.adler >> 16) & 0xff);\n put_byte(s, (strm.adler >> 24) & 0xff);\n put_byte(s, strm.total_in & 0xff);\n put_byte(s, (strm.total_in >> 8) & 0xff);\n put_byte(s, (strm.total_in >> 16) & 0xff);\n put_byte(s, (strm.total_in >> 24) & 0xff);\n }\n else\n {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n\n flush_pending(strm);\n /* If avail_out is zero, the application will call deflate again\n * to flush the rest.\n */\n if (s.wrap > 0) { s.wrap = -s.wrap; }\n /* write the trailer only once! */\n return s.pending !== 0 ? Z_OK : Z_STREAM_END;\n}\n\nfunction deflateEnd(strm) {\n var status;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n status = strm.state.status;\n if (status !== INIT_STATE &&\n status !== EXTRA_STATE &&\n status !== NAME_STATE &&\n status !== COMMENT_STATE &&\n status !== HCRC_STATE &&\n status !== BUSY_STATE &&\n status !== FINISH_STATE\n ) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.state = null;\n\n return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK;\n}\n\n\n/* =========================================================================\n * Initializes the compression dictionary from the given byte\n * sequence without producing any compressed output.\n */\nfunction deflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var s;\n var str, n;\n var wrap;\n var avail;\n var next;\n var input;\n var tmpDict;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n s = strm.state;\n wrap = s.wrap;\n\n if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) {\n return Z_STREAM_ERROR;\n }\n\n /* when using zlib wrappers, compute Adler-32 for provided dictionary */\n if (wrap === 1) {\n /* adler32(strm->adler, dictionary, dictLength); */\n strm.adler = adler32(strm.adler, dictionary, dictLength, 0);\n }\n\n s.wrap = 0; /* avoid computing Adler-32 in read_buf */\n\n /* if dictionary would fill window, just replace the history */\n if (dictLength >= s.w_size) {\n if (wrap === 0) { /* already empty otherwise */\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n /* use the tail */\n // dictionary = dictionary.slice(dictLength - s.w_size);\n tmpDict = new utils.Buf8(s.w_size);\n utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0);\n dictionary = tmpDict;\n dictLength = s.w_size;\n }\n /* insert dictionary into window and hash */\n avail = strm.avail_in;\n next = strm.next_in;\n input = strm.input;\n strm.avail_in = dictLength;\n strm.next_in = 0;\n strm.input = dictionary;\n fill_window(s);\n while (s.lookahead >= MIN_MATCH) {\n str = s.strstart;\n n = s.lookahead - (MIN_MATCH - 1);\n do {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n\n s.head[s.ins_h] = str;\n str++;\n } while (--n);\n s.strstart = str;\n s.lookahead = MIN_MATCH - 1;\n fill_window(s);\n }\n s.strstart += s.lookahead;\n s.block_start = s.strstart;\n s.insert = s.lookahead;\n s.lookahead = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n strm.next_in = next;\n strm.input = input;\n strm.avail_in = avail;\n s.wrap = wrap;\n return Z_OK;\n}\n\n\nexports.deflateInit = deflateInit;\nexports.deflateInit2 = deflateInit2;\nexports.deflateReset = deflateReset;\nexports.deflateResetKeep = deflateResetKeep;\nexports.deflateSetHeader = deflateSetHeader;\nexports.deflate = deflate;\nexports.deflateEnd = deflateEnd;\nexports.deflateSetDictionary = deflateSetDictionary;\nexports.deflateInfo = 'pako deflate (from Nodeca project)';\n\n/* Not implemented\nexports.deflateBound = deflateBound;\nexports.deflateCopy = deflateCopy;\nexports.deflateParams = deflateParams;\nexports.deflatePending = deflatePending;\nexports.deflatePrime = deflatePrime;\nexports.deflateTune = deflateTune;\n*/\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nmodule.exports = {\n 2: 'need dictionary', /* Z_NEED_DICT 2 */\n 1: 'stream end', /* Z_STREAM_END 1 */\n 0: '', /* Z_OK 0 */\n '-1': 'file error', /* Z_ERRNO (-1) */\n '-2': 'stream error', /* Z_STREAM_ERROR (-2) */\n '-3': 'data error', /* Z_DATA_ERROR (-3) */\n '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */\n '-5': 'buffer error', /* Z_BUF_ERROR (-5) */\n '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */\n};\n","// String encode/decode helpers\n'use strict';\n\n\nvar utils = require('./common');\n\n\n// Quick check if we can use fast array to bin string conversion\n//\n// - apply(Array) can fail on Android 2.2\n// - apply(Uint8Array) can fail on iOS 5.1 Safari\n//\nvar STR_APPLY_OK = true;\nvar STR_APPLY_UIA_OK = true;\n\ntry { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; }\ntry { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }\n\n\n// Table with utf8 lengths (calculated by first byte of sequence)\n// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,\n// because max possible codepoint is 0x10ffff\nvar _utf8len = new utils.Buf8(256);\nfor (var q = 0; q < 256; q++) {\n _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);\n}\n_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start\n\n\n// convert string to array (typed, when possible)\nexports.string2buf = function (str) {\n var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;\n\n // count binary size\n for (m_pos = 0; m_pos < str_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;\n }\n\n // allocate buffer\n buf = new utils.Buf8(buf_len);\n\n // convert\n for (i = 0, m_pos = 0; i < buf_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n if (c < 0x80) {\n /* one byte */\n buf[i++] = c;\n } else if (c < 0x800) {\n /* two bytes */\n buf[i++] = 0xC0 | (c >>> 6);\n buf[i++] = 0x80 | (c & 0x3f);\n } else if (c < 0x10000) {\n /* three bytes */\n buf[i++] = 0xE0 | (c >>> 12);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n } else {\n /* four bytes */\n buf[i++] = 0xf0 | (c >>> 18);\n buf[i++] = 0x80 | (c >>> 12 & 0x3f);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n }\n }\n\n return buf;\n};\n\n// Helper (used in 2 places)\nfunction buf2binstring(buf, len) {\n // On Chrome, the arguments in a function call that are allowed is `65534`.\n // If the length of the buffer is smaller than that, we can use this optimization,\n // otherwise we will take a slower path.\n if (len < 65534) {\n if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) {\n return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len));\n }\n }\n\n var result = '';\n for (var i = 0; i < len; i++) {\n result += String.fromCharCode(buf[i]);\n }\n return result;\n}\n\n\n// Convert byte array to binary string\nexports.buf2binstring = function (buf) {\n return buf2binstring(buf, buf.length);\n};\n\n\n// Convert binary string (typed, when possible)\nexports.binstring2buf = function (str) {\n var buf = new utils.Buf8(str.length);\n for (var i = 0, len = buf.length; i < len; i++) {\n buf[i] = str.charCodeAt(i);\n }\n return buf;\n};\n\n\n// convert array to string\nexports.buf2string = function (buf, max) {\n var i, out, c, c_len;\n var len = max || buf.length;\n\n // Reserve max possible length (2 words per char)\n // NB: by unknown reasons, Array is significantly faster for\n // String.fromCharCode.apply than Uint16Array.\n var utf16buf = new Array(len * 2);\n\n for (out = 0, i = 0; i < len;) {\n c = buf[i++];\n // quick process ascii\n if (c < 0x80) { utf16buf[out++] = c; continue; }\n\n c_len = _utf8len[c];\n // skip 5 & 6 byte codes\n if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }\n\n // apply mask on first byte\n c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;\n // join the rest\n while (c_len > 1 && i < len) {\n c = (c << 6) | (buf[i++] & 0x3f);\n c_len--;\n }\n\n // terminated by end of string?\n if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }\n\n if (c < 0x10000) {\n utf16buf[out++] = c;\n } else {\n c -= 0x10000;\n utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);\n utf16buf[out++] = 0xdc00 | (c & 0x3ff);\n }\n }\n\n return buf2binstring(utf16buf, out);\n};\n\n\n// Calculate max possible position in utf8 buffer,\n// that will not break sequence. If that's not possible\n// - (very small limits) return max size as is.\n//\n// buf[] - utf8 bytes array\n// max - length limit (mandatory);\nexports.utf8border = function (buf, max) {\n var pos;\n\n max = max || buf.length;\n if (max > buf.length) { max = buf.length; }\n\n // go back from last position, until start of sequence found\n pos = max - 1;\n while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }\n\n // Very small and broken sequence,\n // return max, because we should return something anyway.\n if (pos < 0) { return max; }\n\n // If we came to start of buffer - that means buffer is too small,\n // return max too.\n if (pos === 0) { return max; }\n\n return (pos + _utf8len[buf[pos]] > max) ? pos : max;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction ZStream() {\n /* next input byte */\n this.input = null; // JS specific, because we have no pointers\n this.next_in = 0;\n /* number of bytes available at input */\n this.avail_in = 0;\n /* total number of input bytes read so far */\n this.total_in = 0;\n /* next output byte should be put there */\n this.output = null; // JS specific, because we have no pointers\n this.next_out = 0;\n /* remaining free space at output */\n this.avail_out = 0;\n /* total number of bytes output so far */\n this.total_out = 0;\n /* last error message, NULL if no error */\n this.msg = ''/*Z_NULL*/;\n /* not visible by applications */\n this.state = null;\n /* best guess about the data type: binary or text */\n this.data_type = 2/*Z_UNKNOWN*/;\n /* adler32 value of the uncompressed data */\n this.adler = 0;\n}\n\nmodule.exports = ZStream;\n","'use strict';\n\n\nvar zlib_deflate = require('./zlib/deflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\n\nvar toString = Object.prototype.toString;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nvar Z_NO_FLUSH = 0;\nvar Z_FINISH = 4;\n\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_SYNC_FLUSH = 2;\n\nvar Z_DEFAULT_COMPRESSION = -1;\n\nvar Z_DEFAULT_STRATEGY = 0;\n\nvar Z_DEFLATED = 8;\n\n/* ===========================================================================*/\n\n\n/**\n * class Deflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[deflate]],\n * [[deflateRaw]] and [[gzip]].\n **/\n\n/* internal\n * Deflate.chunks -> Array\n *\n * Chunks of output data, if [[Deflate#onData]] not overridden.\n **/\n\n/**\n * Deflate.result -> Uint8Array|Array\n *\n * Compressed result, generated by default [[Deflate#onData]]\n * and [[Deflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Deflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Deflate.err -> Number\n *\n * Error code after deflate finished. 0 (Z_OK) on success.\n * You will not need it in real life, because deflate errors\n * are possible only on wrong options or bad `onData` / `onEnd`\n * custom handlers.\n **/\n\n/**\n * Deflate.msg -> String\n *\n * Error message, if [[Deflate.err]] != 0\n **/\n\n\n/**\n * new Deflate(options)\n * - options (Object): zlib deflate options.\n *\n * Creates new deflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `level`\n * - `windowBits`\n * - `memLevel`\n * - `strategy`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw deflate\n * - `gzip` (Boolean) - create gzip wrapper\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n * - `header` (Object) - custom header for gzip\n * - `text` (Boolean) - true if compressed data believed to be text\n * - `time` (Number) - modification time, unix timestamp\n * - `os` (Number) - operation system code\n * - `extra` (Array) - array of bytes with extra data (max 65536)\n * - `name` (String) - file name (binary string)\n * - `comment` (String) - comment (binary string)\n * - `hcrc` (Boolean) - true if header crc should be added\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var deflate = new pako.Deflate({ level: 3});\n *\n * deflate.push(chunk1, false);\n * deflate.push(chunk2, true); // true -> last chunk\n *\n * if (deflate.err) { throw new Error(deflate.err); }\n *\n * console.log(deflate.result);\n * ```\n **/\nfunction Deflate(options) {\n if (!(this instanceof Deflate)) return new Deflate(options);\n\n this.options = utils.assign({\n level: Z_DEFAULT_COMPRESSION,\n method: Z_DEFLATED,\n chunkSize: 16384,\n windowBits: 15,\n memLevel: 8,\n strategy: Z_DEFAULT_STRATEGY,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n if (opt.raw && (opt.windowBits > 0)) {\n opt.windowBits = -opt.windowBits;\n }\n\n else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {\n opt.windowBits += 16;\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_deflate.deflateInit2(\n this.strm,\n opt.level,\n opt.method,\n opt.windowBits,\n opt.memLevel,\n opt.strategy\n );\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n if (opt.header) {\n zlib_deflate.deflateSetHeader(this.strm, opt.header);\n }\n\n if (opt.dictionary) {\n var dict;\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n // If we need to compress text, change encoding to utf8.\n dict = strings.string2buf(opt.dictionary);\n } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {\n dict = new Uint8Array(opt.dictionary);\n } else {\n dict = opt.dictionary;\n }\n\n status = zlib_deflate.deflateSetDictionary(this.strm, dict);\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n this._dict_set = true;\n }\n}\n\n/**\n * Deflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be\n * converted to utf8 byte sequence.\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH.\n *\n * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with\n * new compressed chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the compression context.\n *\n * On fail call [[Deflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * array format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nDeflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var status, _mode;\n\n if (this.ended) { return false; }\n\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // If we need to compress text, change encoding to utf8.\n strm.input = strings.string2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n status = zlib_deflate.deflate(strm, _mode); /* no bad return value */\n\n if (status !== Z_STREAM_END && status !== Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) {\n if (this.options.to === 'string') {\n this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out)));\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END);\n\n // Finalize on the last chunk.\n if (_mode === Z_FINISH) {\n status = zlib_deflate.deflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === Z_SYNC_FLUSH) {\n this.onEnd(Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Deflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): output data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nDeflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Deflate#onEnd(status) -> Void\n * - status (Number): deflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called once after you tell deflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nDeflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === Z_OK) {\n if (this.options.to === 'string') {\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * deflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * Compress `data` with deflate algorithm and `options`.\n *\n * Supported options are:\n *\n * - level\n * - windowBits\n * - memLevel\n * - strategy\n * - dictionary\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , data = Uint8Array([1,2,3,4,5,6,7,8,9]);\n *\n * console.log(pako.deflate(data));\n * ```\n **/\nfunction deflate(input, options) {\n var deflator = new Deflate(options);\n\n deflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (deflator.err) { throw deflator.msg || msg[deflator.err]; }\n\n return deflator.result;\n}\n\n\n/**\n * deflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction deflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return deflate(input, options);\n}\n\n\n/**\n * gzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but create gzip wrapper instead of\n * deflate one.\n **/\nfunction gzip(input, options) {\n options = options || {};\n options.gzip = true;\n return deflate(input, options);\n}\n\n\nexports.Deflate = Deflate;\nexports.deflate = deflate;\nexports.deflateRaw = deflateRaw;\nexports.gzip = gzip;\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// See state defs from inflate.js\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\n\n/*\n Decode literal, length, and distance codes and write out the resulting\n literal and match bytes until either not enough input or output is\n available, an end-of-block is encountered, or a data error is encountered.\n When large enough input and output buffers are supplied to inflate(), for\n example, a 16K input buffer and a 64K output buffer, more than 95% of the\n inflate execution time is spent in this routine.\n\n Entry assumptions:\n\n state.mode === LEN\n strm.avail_in >= 6\n strm.avail_out >= 258\n start >= strm.avail_out\n state.bits < 8\n\n On return, state.mode is one of:\n\n LEN -- ran out of enough output space or enough available input\n TYPE -- reached end of block code, inflate() to interpret next block\n BAD -- error in block data\n\n Notes:\n\n - The maximum input bits used by a length/distance pair is 15 bits for the\n length code, 5 bits for the length extra, 15 bits for the distance code,\n and 13 bits for the distance extra. This totals 48 bits, or six bytes.\n Therefore if strm.avail_in >= 6, then there is enough input to avoid\n checking for available input while decoding.\n\n - The maximum bytes that a single length/distance pair can output is 258\n bytes, which is the maximum length that can be coded. inflate_fast()\n requires strm.avail_out >= 258 for each loop to avoid checking for\n output space.\n */\nmodule.exports = function inflate_fast(strm, start) {\n var state;\n var _in; /* local strm.input */\n var last; /* have enough input while in < last */\n var _out; /* local strm.output */\n var beg; /* inflate()'s initial strm.output */\n var end; /* while out < end, enough space available */\n//#ifdef INFLATE_STRICT\n var dmax; /* maximum distance from zlib header */\n//#endif\n var wsize; /* window size or zero if not using window */\n var whave; /* valid bytes in the window */\n var wnext; /* window write index */\n // Use `s_window` instead `window`, avoid conflict with instrumentation tools\n var s_window; /* allocated sliding window, if wsize != 0 */\n var hold; /* local strm.hold */\n var bits; /* local strm.bits */\n var lcode; /* local strm.lencode */\n var dcode; /* local strm.distcode */\n var lmask; /* mask for first level of length codes */\n var dmask; /* mask for first level of distance codes */\n var here; /* retrieved table entry */\n var op; /* code bits, operation, extra bits, or */\n /* window position, window bytes to copy */\n var len; /* match length, unused bytes */\n var dist; /* match distance */\n var from; /* where to copy match from */\n var from_source;\n\n\n var input, output; // JS specific, because we have no pointers\n\n /* copy state to local variables */\n state = strm.state;\n //here = state.here;\n _in = strm.next_in;\n input = strm.input;\n last = _in + (strm.avail_in - 5);\n _out = strm.next_out;\n output = strm.output;\n beg = _out - (start - strm.avail_out);\n end = _out + (strm.avail_out - 257);\n//#ifdef INFLATE_STRICT\n dmax = state.dmax;\n//#endif\n wsize = state.wsize;\n whave = state.whave;\n wnext = state.wnext;\n s_window = state.window;\n hold = state.hold;\n bits = state.bits;\n lcode = state.lencode;\n dcode = state.distcode;\n lmask = (1 << state.lenbits) - 1;\n dmask = (1 << state.distbits) - 1;\n\n\n /* decode literals and length/distances until end-of-block or not enough\n input data or output space */\n\n top:\n do {\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n\n here = lcode[hold & lmask];\n\n dolen:\n for (;;) { // Goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n if (op === 0) { /* literal */\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n output[_out++] = here & 0xffff/*here.val*/;\n }\n else if (op & 16) { /* length base */\n len = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (op) {\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n len += hold & ((1 << op) - 1);\n hold >>>= op;\n bits -= op;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", len));\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n here = dcode[hold & dmask];\n\n dodist:\n for (;;) { // goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n\n if (op & 16) { /* distance base */\n dist = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n }\n dist += hold & ((1 << op) - 1);\n//#ifdef INFLATE_STRICT\n if (dist > dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n//#endif\n hold >>>= op;\n bits -= op;\n //Tracevv((stderr, \"inflate: distance %u\\n\", dist));\n op = _out - beg; /* max distance in output */\n if (dist > op) { /* see if copy from window */\n op = dist - op; /* distance back in window */\n if (op > whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// if (len <= op - whave) {\n// do {\n// output[_out++] = 0;\n// } while (--len);\n// continue top;\n// }\n// len -= op - whave;\n// do {\n// output[_out++] = 0;\n// } while (--op > whave);\n// if (op === 0) {\n// from = _out - dist;\n// do {\n// output[_out++] = output[from++];\n// } while (--len);\n// continue top;\n// }\n//#endif\n }\n from = 0; // window index\n from_source = s_window;\n if (wnext === 0) { /* very common case */\n from += wsize - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n else if (wnext < op) { /* wrap around window */\n from += wsize + wnext - op;\n op -= wnext;\n if (op < len) { /* some from end of window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = 0;\n if (wnext < len) { /* some from start of window */\n op = wnext;\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n }\n else { /* contiguous in window */\n from += wnext - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n while (len > 2) {\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n len -= 3;\n }\n if (len) {\n output[_out++] = from_source[from++];\n if (len > 1) {\n output[_out++] = from_source[from++];\n }\n }\n }\n else {\n from = _out - dist; /* copy direct from output */\n do { /* minimum length is three */\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n len -= 3;\n } while (len > 2);\n if (len) {\n output[_out++] = output[from++];\n if (len > 1) {\n output[_out++] = output[from++];\n }\n }\n }\n }\n else if ((op & 64) === 0) { /* 2nd level distance code */\n here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dodist;\n }\n else {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n }\n else if ((op & 64) === 0) { /* 2nd level length code */\n here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dolen;\n }\n else if (op & 32) { /* end-of-block */\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.mode = TYPE;\n break top;\n }\n else {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n } while (_in < last && _out < end);\n\n /* return unused bytes (on entry, bits < 8, so in won't go too far back) */\n len = bits >> 3;\n _in -= len;\n bits -= len << 3;\n hold &= (1 << bits) - 1;\n\n /* update state and return */\n strm.next_in = _in;\n strm.next_out = _out;\n strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last));\n strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end));\n state.hold = hold;\n state.bits = bits;\n return;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\n\nvar MAXBITS = 15;\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\nvar lbase = [ /* Length codes 257..285 base */\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,\n 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0\n];\n\nvar lext = [ /* Length codes 257..285 extra */\n 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,\n 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78\n];\n\nvar dbase = [ /* Distance codes 0..29 base */\n 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,\n 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,\n 8193, 12289, 16385, 24577, 0, 0\n];\n\nvar dext = [ /* Distance codes 0..29 extra */\n 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,\n 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,\n 28, 28, 29, 29, 64, 64\n];\n\nmodule.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts)\n{\n var bits = opts.bits;\n //here = opts.here; /* table entry for duplication */\n\n var len = 0; /* a code's length in bits */\n var sym = 0; /* index of code symbols */\n var min = 0, max = 0; /* minimum and maximum code lengths */\n var root = 0; /* number of index bits for root table */\n var curr = 0; /* number of index bits for current table */\n var drop = 0; /* code bits to drop for sub-table */\n var left = 0; /* number of prefix codes available */\n var used = 0; /* code entries in table used */\n var huff = 0; /* Huffman code */\n var incr; /* for incrementing code, index */\n var fill; /* index for replicating entries */\n var low; /* low bits for current root entry */\n var mask; /* mask for low root bits */\n var next; /* next available space in table */\n var base = null; /* base value table to use */\n var base_index = 0;\n// var shoextra; /* extra bits table to use */\n var end; /* use base and extra for symbol > end */\n var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */\n var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */\n var extra = null;\n var extra_index = 0;\n\n var here_bits, here_op, here_val;\n\n /*\n Process a set of code lengths to create a canonical Huffman code. The\n code lengths are lens[0..codes-1]. Each length corresponds to the\n symbols 0..codes-1. The Huffman code is generated by first sorting the\n symbols by length from short to long, and retaining the symbol order\n for codes with equal lengths. Then the code starts with all zero bits\n for the first code of the shortest length, and the codes are integer\n increments for the same length, and zeros are appended as the length\n increases. For the deflate format, these bits are stored backwards\n from their more natural integer increment ordering, and so when the\n decoding tables are built in the large loop below, the integer codes\n are incremented backwards.\n\n This routine assumes, but does not check, that all of the entries in\n lens[] are in the range 0..MAXBITS. The caller must assure this.\n 1..MAXBITS is interpreted as that code length. zero means that that\n symbol does not occur in this code.\n\n The codes are sorted by computing a count of codes for each length,\n creating from that a table of starting indices for each length in the\n sorted table, and then entering the symbols in order in the sorted\n table. The sorted table is work[], with that space being provided by\n the caller.\n\n The length counts are used for other purposes as well, i.e. finding\n the minimum and maximum length codes, determining if there are any\n codes at all, checking for a valid set of lengths, and looking ahead\n at length counts to determine sub-table sizes when building the\n decoding tables.\n */\n\n /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */\n for (len = 0; len <= MAXBITS; len++) {\n count[len] = 0;\n }\n for (sym = 0; sym < codes; sym++) {\n count[lens[lens_index + sym]]++;\n }\n\n /* bound code lengths, force root to be within code lengths */\n root = bits;\n for (max = MAXBITS; max >= 1; max--) {\n if (count[max] !== 0) { break; }\n }\n if (root > max) {\n root = max;\n }\n if (max === 0) { /* no symbols to code at all */\n //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */\n //table.bits[opts.table_index] = 1; //here.bits = (var char)1;\n //table.val[opts.table_index++] = 0; //here.val = (var short)0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n\n //table.op[opts.table_index] = 64;\n //table.bits[opts.table_index] = 1;\n //table.val[opts.table_index++] = 0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n opts.bits = 1;\n return 0; /* no symbols, but wait for decoding to report error */\n }\n for (min = 1; min < max; min++) {\n if (count[min] !== 0) { break; }\n }\n if (root < min) {\n root = min;\n }\n\n /* check for an over-subscribed or incomplete set of lengths */\n left = 1;\n for (len = 1; len <= MAXBITS; len++) {\n left <<= 1;\n left -= count[len];\n if (left < 0) {\n return -1;\n } /* over-subscribed */\n }\n if (left > 0 && (type === CODES || max !== 1)) {\n return -1; /* incomplete set */\n }\n\n /* generate offsets into symbol table for each length for sorting */\n offs[1] = 0;\n for (len = 1; len < MAXBITS; len++) {\n offs[len + 1] = offs[len] + count[len];\n }\n\n /* sort symbols by length, by symbol order within each length */\n for (sym = 0; sym < codes; sym++) {\n if (lens[lens_index + sym] !== 0) {\n work[offs[lens[lens_index + sym]]++] = sym;\n }\n }\n\n /*\n Create and fill in decoding tables. In this loop, the table being\n filled is at next and has curr index bits. The code being used is huff\n with length len. That code is converted to an index by dropping drop\n bits off of the bottom. For codes where len is less than drop + curr,\n those top drop + curr - len bits are incremented through all values to\n fill the table with replicated entries.\n\n root is the number of index bits for the root table. When len exceeds\n root, sub-tables are created pointed to by the root entry with an index\n of the low root bits of huff. This is saved in low to check for when a\n new sub-table should be started. drop is zero when the root table is\n being filled, and drop is root when sub-tables are being filled.\n\n When a new sub-table is needed, it is necessary to look ahead in the\n code lengths to determine what size sub-table is needed. The length\n counts are used for this, and so count[] is decremented as codes are\n entered in the tables.\n\n used keeps track of how many table entries have been allocated from the\n provided *table space. It is checked for LENS and DIST tables against\n the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in\n the initial root table size constants. See the comments in inftrees.h\n for more information.\n\n sym increments through all symbols, and the loop terminates when\n all codes of length max, i.e. all codes, have been processed. This\n routine permits incomplete codes, so another loop after this one fills\n in the rest of the decoding tables with invalid code markers.\n */\n\n /* set up for code type */\n // poor man optimization - use if-else instead of switch,\n // to avoid deopts in old v8\n if (type === CODES) {\n base = extra = work; /* dummy value--not used */\n end = 19;\n\n } else if (type === LENS) {\n base = lbase;\n base_index -= 257;\n extra = lext;\n extra_index -= 257;\n end = 256;\n\n } else { /* DISTS */\n base = dbase;\n extra = dext;\n end = -1;\n }\n\n /* initialize opts for loop */\n huff = 0; /* starting code */\n sym = 0; /* starting code symbol */\n len = min; /* starting code length */\n next = table_index; /* current table to fill in */\n curr = root; /* current table index bits */\n drop = 0; /* current bits to drop from code for index */\n low = -1; /* trigger new sub-table when len > root */\n used = 1 << root; /* use root table entries */\n mask = used - 1; /* mask for comparing low */\n\n /* check available table space */\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* process all codes and make table entries */\n for (;;) {\n /* create table entry */\n here_bits = len - drop;\n if (work[sym] < end) {\n here_op = 0;\n here_val = work[sym];\n }\n else if (work[sym] > end) {\n here_op = extra[extra_index + work[sym]];\n here_val = base[base_index + work[sym]];\n }\n else {\n here_op = 32 + 64; /* end of block */\n here_val = 0;\n }\n\n /* replicate for those indices with low len bits equal to huff */\n incr = 1 << (len - drop);\n fill = 1 << curr;\n min = fill; /* save offset to next table */\n do {\n fill -= incr;\n table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0;\n } while (fill !== 0);\n\n /* backwards increment the len-bit code huff */\n incr = 1 << (len - 1);\n while (huff & incr) {\n incr >>= 1;\n }\n if (incr !== 0) {\n huff &= incr - 1;\n huff += incr;\n } else {\n huff = 0;\n }\n\n /* go to next symbol, update count, len */\n sym++;\n if (--count[len] === 0) {\n if (len === max) { break; }\n len = lens[lens_index + work[sym]];\n }\n\n /* create new sub-table if needed */\n if (len > root && (huff & mask) !== low) {\n /* if first time, transition to sub-tables */\n if (drop === 0) {\n drop = root;\n }\n\n /* increment past last table */\n next += min; /* here min is 1 << curr */\n\n /* determine length of next table */\n curr = len - drop;\n left = 1 << curr;\n while (curr + drop < max) {\n left -= count[curr + drop];\n if (left <= 0) { break; }\n curr++;\n left <<= 1;\n }\n\n /* check for enough space */\n used += 1 << curr;\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* point entry in root table to sub-table */\n low = huff & mask;\n /*table.op[low] = curr;\n table.bits[low] = root;\n table.val[low] = next - opts.table_index;*/\n table[low] = (root << 24) | (curr << 16) | (next - table_index) |0;\n }\n }\n\n /* fill in remaining table entry if code is incomplete (guaranteed to have\n at most one remaining entry, since if the code is incomplete, the\n maximum code length that was allowed to get this far is one bit) */\n if (huff !== 0) {\n //table.op[next + huff] = 64; /* invalid code marker */\n //table.bits[next + huff] = len - drop;\n //table.val[next + huff] = 0;\n table[next + huff] = ((len - drop) << 24) | (64 << 16) |0;\n }\n\n /* set return parameters */\n //opts.table_index += used;\n opts.bits = root;\n return 0;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar inflate_fast = require('./inffast');\nvar inflate_table = require('./inftrees');\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\n//var Z_NO_FLUSH = 0;\n//var Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\n//var Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\nvar Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\nvar Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n\n/* STATES ====================================================================*/\n/* ===========================================================================*/\n\n\nvar HEAD = 1; /* i: waiting for magic header */\nvar FLAGS = 2; /* i: waiting for method and flags (gzip) */\nvar TIME = 3; /* i: waiting for modification time (gzip) */\nvar OS = 4; /* i: waiting for extra flags and operating system (gzip) */\nvar EXLEN = 5; /* i: waiting for extra length (gzip) */\nvar EXTRA = 6; /* i: waiting for extra bytes (gzip) */\nvar NAME = 7; /* i: waiting for end of file name (gzip) */\nvar COMMENT = 8; /* i: waiting for end of comment (gzip) */\nvar HCRC = 9; /* i: waiting for header crc (gzip) */\nvar DICTID = 10; /* i: waiting for dictionary check value */\nvar DICT = 11; /* waiting for inflateSetDictionary() call */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\nvar TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */\nvar STORED = 14; /* i: waiting for stored size (length and complement) */\nvar COPY_ = 15; /* i/o: same as COPY below, but only first time in */\nvar COPY = 16; /* i/o: waiting for input or output to copy stored block */\nvar TABLE = 17; /* i: waiting for dynamic block table lengths */\nvar LENLENS = 18; /* i: waiting for code length code lengths */\nvar CODELENS = 19; /* i: waiting for length/lit and distance code lengths */\nvar LEN_ = 20; /* i: same as LEN below, but only first time in */\nvar LEN = 21; /* i: waiting for length/lit/eob code */\nvar LENEXT = 22; /* i: waiting for length extra bits */\nvar DIST = 23; /* i: waiting for distance code */\nvar DISTEXT = 24; /* i: waiting for distance extra bits */\nvar MATCH = 25; /* o: waiting for output space to copy string */\nvar LIT = 26; /* o: waiting for output space to write literal */\nvar CHECK = 27; /* i: waiting for 32-bit check value */\nvar LENGTH = 28; /* i: waiting for 32-bit length (gzip) */\nvar DONE = 29; /* finished check, done -- remain here until reset */\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar MEM = 31; /* got an inflate() memory error -- remain here until reset */\nvar SYNC = 32; /* looking for synchronization bytes to restart inflate() */\n\n/* ===========================================================================*/\n\n\n\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_WBITS = MAX_WBITS;\n\n\nfunction zswap32(q) {\n return (((q >>> 24) & 0xff) +\n ((q >>> 8) & 0xff00) +\n ((q & 0xff00) << 8) +\n ((q & 0xff) << 24));\n}\n\n\nfunction InflateState() {\n this.mode = 0; /* current inflate mode */\n this.last = false; /* true if processing last block */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.havedict = false; /* true if dictionary provided */\n this.flags = 0; /* gzip header method and flags (0 if zlib) */\n this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */\n this.check = 0; /* protected copy of check value */\n this.total = 0; /* protected copy of output count */\n // TODO: may be {}\n this.head = null; /* where to save gzip header information */\n\n /* sliding window */\n this.wbits = 0; /* log base 2 of requested window size */\n this.wsize = 0; /* window size or zero if not using window */\n this.whave = 0; /* valid bytes in the window */\n this.wnext = 0; /* window write index */\n this.window = null; /* allocated sliding window, if needed */\n\n /* bit accumulator */\n this.hold = 0; /* input bit accumulator */\n this.bits = 0; /* number of bits in \"in\" */\n\n /* for string and stored block copying */\n this.length = 0; /* literal or length of data to copy */\n this.offset = 0; /* distance back to copy string from */\n\n /* for table and code decoding */\n this.extra = 0; /* extra bits needed */\n\n /* fixed and dynamic code tables */\n this.lencode = null; /* starting table for length/literal codes */\n this.distcode = null; /* starting table for distance codes */\n this.lenbits = 0; /* index bits for lencode */\n this.distbits = 0; /* index bits for distcode */\n\n /* dynamic table building */\n this.ncode = 0; /* number of code length code lengths */\n this.nlen = 0; /* number of length code lengths */\n this.ndist = 0; /* number of distance code lengths */\n this.have = 0; /* number of code lengths in lens[] */\n this.next = null; /* next available space in codes[] */\n\n this.lens = new utils.Buf16(320); /* temporary storage for code lengths */\n this.work = new utils.Buf16(288); /* work area for code table building */\n\n /*\n because we don't have pointers in js, we use lencode and distcode directly\n as buffers so we don't need codes\n */\n //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */\n this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */\n this.distdyn = null; /* dynamic table for distance codes (JS specific) */\n this.sane = 0; /* if false, allow invalid distance too far */\n this.back = 0; /* bits back of last unprocessed length/lit */\n this.was = 0; /* initial length of match */\n}\n\nfunction inflateResetKeep(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n strm.total_in = strm.total_out = state.total = 0;\n strm.msg = ''; /*Z_NULL*/\n if (state.wrap) { /* to support ill-conceived Java test suite */\n strm.adler = state.wrap & 1;\n }\n state.mode = HEAD;\n state.last = 0;\n state.havedict = 0;\n state.dmax = 32768;\n state.head = null/*Z_NULL*/;\n state.hold = 0;\n state.bits = 0;\n //state.lencode = state.distcode = state.next = state.codes;\n state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS);\n state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS);\n\n state.sane = 1;\n state.back = -1;\n //Tracev((stderr, \"inflate: reset\\n\"));\n return Z_OK;\n}\n\nfunction inflateReset(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n state.wsize = 0;\n state.whave = 0;\n state.wnext = 0;\n return inflateResetKeep(strm);\n\n}\n\nfunction inflateReset2(strm, windowBits) {\n var wrap;\n var state;\n\n /* get the state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n /* extract wrap request from windowBits parameter */\n if (windowBits < 0) {\n wrap = 0;\n windowBits = -windowBits;\n }\n else {\n wrap = (windowBits >> 4) + 1;\n if (windowBits < 48) {\n windowBits &= 15;\n }\n }\n\n /* set number of window bits, free window if different */\n if (windowBits && (windowBits < 8 || windowBits > 15)) {\n return Z_STREAM_ERROR;\n }\n if (state.window !== null && state.wbits !== windowBits) {\n state.window = null;\n }\n\n /* update state and reset the rest of it */\n state.wrap = wrap;\n state.wbits = windowBits;\n return inflateReset(strm);\n}\n\nfunction inflateInit2(strm, windowBits) {\n var ret;\n var state;\n\n if (!strm) { return Z_STREAM_ERROR; }\n //strm.msg = Z_NULL; /* in case we return an error */\n\n state = new InflateState();\n\n //if (state === Z_NULL) return Z_MEM_ERROR;\n //Tracev((stderr, \"inflate: allocated\\n\"));\n strm.state = state;\n state.window = null/*Z_NULL*/;\n ret = inflateReset2(strm, windowBits);\n if (ret !== Z_OK) {\n strm.state = null/*Z_NULL*/;\n }\n return ret;\n}\n\nfunction inflateInit(strm) {\n return inflateInit2(strm, DEF_WBITS);\n}\n\n\n/*\n Return state with length and distance decoding tables and index sizes set to\n fixed code decoding. Normally this returns fixed tables from inffixed.h.\n If BUILDFIXED is defined, then instead this routine builds the tables the\n first time it's called, and returns those tables the first time and\n thereafter. This reduces the size of the code by about 2K bytes, in\n exchange for a little execution time. However, BUILDFIXED should not be\n used for threaded applications, since the rewriting of the tables and virgin\n may not be thread-safe.\n */\nvar virgin = true;\n\nvar lenfix, distfix; // We have no pointers in JS, so keep tables separate\n\nfunction fixedtables(state) {\n /* build fixed huffman tables if first call (may not be thread safe) */\n if (virgin) {\n var sym;\n\n lenfix = new utils.Buf32(512);\n distfix = new utils.Buf32(32);\n\n /* literal/length table */\n sym = 0;\n while (sym < 144) { state.lens[sym++] = 8; }\n while (sym < 256) { state.lens[sym++] = 9; }\n while (sym < 280) { state.lens[sym++] = 7; }\n while (sym < 288) { state.lens[sym++] = 8; }\n\n inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 });\n\n /* distance table */\n sym = 0;\n while (sym < 32) { state.lens[sym++] = 5; }\n\n inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 });\n\n /* do this just once */\n virgin = false;\n }\n\n state.lencode = lenfix;\n state.lenbits = 9;\n state.distcode = distfix;\n state.distbits = 5;\n}\n\n\n/*\n Update the window with the last wsize (normally 32K) bytes written before\n returning. If window does not exist yet, create it. This is only called\n when a window is already in use, or when output has been written during this\n inflate call, but the end of the deflate stream has not been reached yet.\n It is also called to create a window for dictionary data when a dictionary\n is loaded.\n\n Providing output buffers larger than 32K to inflate() should provide a speed\n advantage, since only the last 32K of output is copied to the sliding window\n upon return from inflate(), and since all distances after the first 32K of\n output will fall in the output data, making match copies simpler and faster.\n The advantage may be dependent on the size of the processor's data caches.\n */\nfunction updatewindow(strm, src, end, copy) {\n var dist;\n var state = strm.state;\n\n /* if it hasn't been done already, allocate space for the window */\n if (state.window === null) {\n state.wsize = 1 << state.wbits;\n state.wnext = 0;\n state.whave = 0;\n\n state.window = new utils.Buf8(state.wsize);\n }\n\n /* copy state->wsize or less output bytes into the circular window */\n if (copy >= state.wsize) {\n utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0);\n state.wnext = 0;\n state.whave = state.wsize;\n }\n else {\n dist = state.wsize - state.wnext;\n if (dist > copy) {\n dist = copy;\n }\n //zmemcpy(state->window + state->wnext, end - copy, dist);\n utils.arraySet(state.window, src, end - copy, dist, state.wnext);\n copy -= dist;\n if (copy) {\n //zmemcpy(state->window, end - copy, copy);\n utils.arraySet(state.window, src, end - copy, copy, 0);\n state.wnext = copy;\n state.whave = state.wsize;\n }\n else {\n state.wnext += dist;\n if (state.wnext === state.wsize) { state.wnext = 0; }\n if (state.whave < state.wsize) { state.whave += dist; }\n }\n }\n return 0;\n}\n\nfunction inflate(strm, flush) {\n var state;\n var input, output; // input/output buffers\n var next; /* next input INDEX */\n var put; /* next output INDEX */\n var have, left; /* available input and output */\n var hold; /* bit buffer */\n var bits; /* bits in bit buffer */\n var _in, _out; /* save starting available input and output */\n var copy; /* number of stored or match bytes to copy */\n var from; /* where to copy match bytes from */\n var from_source;\n var here = 0; /* current decoding table entry */\n var here_bits, here_op, here_val; // paked \"here\" denormalized (JS specific)\n //var last; /* parent table entry */\n var last_bits, last_op, last_val; // paked \"last\" denormalized (JS specific)\n var len; /* length to copy for repeats, bits to drop */\n var ret; /* return code */\n var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */\n var opts;\n\n var n; // temporary var for NEED_BITS\n\n var order = /* permutation of code lengths */\n [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];\n\n\n if (!strm || !strm.state || !strm.output ||\n (!strm.input && strm.avail_in !== 0)) {\n return Z_STREAM_ERROR;\n }\n\n state = strm.state;\n if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */\n\n\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n _in = have;\n _out = left;\n ret = Z_OK;\n\n inf_leave: // goto emulation\n for (;;) {\n switch (state.mode) {\n case HEAD:\n if (state.wrap === 0) {\n state.mode = TYPEDO;\n break;\n }\n //=== NEEDBITS(16);\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */\n state.check = 0/*crc32(0L, Z_NULL, 0)*/;\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = FLAGS;\n break;\n }\n state.flags = 0; /* expect zlib header */\n if (state.head) {\n state.head.done = false;\n }\n if (!(state.wrap & 1) || /* check if zlib header allowed */\n (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) {\n strm.msg = 'incorrect header check';\n state.mode = BAD;\n break;\n }\n if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n len = (hold & 0x0f)/*BITS(4)*/ + 8;\n if (state.wbits === 0) {\n state.wbits = len;\n }\n else if (len > state.wbits) {\n strm.msg = 'invalid window size';\n state.mode = BAD;\n break;\n }\n state.dmax = 1 << len;\n //Tracev((stderr, \"inflate: zlib header ok\\n\"));\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = hold & 0x200 ? DICTID : TYPE;\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n break;\n case FLAGS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.flags = hold;\n if ((state.flags & 0xff) !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n if (state.flags & 0xe000) {\n strm.msg = 'unknown header flags set';\n state.mode = BAD;\n break;\n }\n if (state.head) {\n state.head.text = ((hold >> 8) & 1);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = TIME;\n /* falls through */\n case TIME:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.time = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC4(state.check, hold)\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n hbuf[2] = (hold >>> 16) & 0xff;\n hbuf[3] = (hold >>> 24) & 0xff;\n state.check = crc32(state.check, hbuf, 4, 0);\n //===\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = OS;\n /* falls through */\n case OS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.xflags = (hold & 0xff);\n state.head.os = (hold >> 8);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = EXLEN;\n /* falls through */\n case EXLEN:\n if (state.flags & 0x0400) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length = hold;\n if (state.head) {\n state.head.extra_len = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n else if (state.head) {\n state.head.extra = null/*Z_NULL*/;\n }\n state.mode = EXTRA;\n /* falls through */\n case EXTRA:\n if (state.flags & 0x0400) {\n copy = state.length;\n if (copy > have) { copy = have; }\n if (copy) {\n if (state.head) {\n len = state.head.extra_len - state.length;\n if (!state.head.extra) {\n // Use untyped array for more convenient processing later\n state.head.extra = new Array(state.head.extra_len);\n }\n utils.arraySet(\n state.head.extra,\n input,\n next,\n // extra field is limited to 65536 bytes\n // - no need for additional size check\n copy,\n /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/\n len\n );\n //zmemcpy(state.head.extra + len, next,\n // len + copy > state.head.extra_max ?\n // state.head.extra_max - len : copy);\n }\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n state.length -= copy;\n }\n if (state.length) { break inf_leave; }\n }\n state.length = 0;\n state.mode = NAME;\n /* falls through */\n case NAME:\n if (state.flags & 0x0800) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n // TODO: 2 or 1 bytes?\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.name_max*/)) {\n state.head.name += String.fromCharCode(len);\n }\n } while (len && copy < have);\n\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.name = null;\n }\n state.length = 0;\n state.mode = COMMENT;\n /* falls through */\n case COMMENT:\n if (state.flags & 0x1000) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.comm_max*/)) {\n state.head.comment += String.fromCharCode(len);\n }\n } while (len && copy < have);\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.comment = null;\n }\n state.mode = HCRC;\n /* falls through */\n case HCRC:\n if (state.flags & 0x0200) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.check & 0xffff)) {\n strm.msg = 'header crc mismatch';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n if (state.head) {\n state.head.hcrc = ((state.flags >> 9) & 1);\n state.head.done = true;\n }\n strm.adler = state.check = 0;\n state.mode = TYPE;\n break;\n case DICTID:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n strm.adler = state.check = zswap32(hold);\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = DICT;\n /* falls through */\n case DICT:\n if (state.havedict === 0) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n return Z_NEED_DICT;\n }\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = TYPE;\n /* falls through */\n case TYPE:\n if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case TYPEDO:\n if (state.last) {\n //--- BYTEBITS() ---//\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n state.mode = CHECK;\n break;\n }\n //=== NEEDBITS(3); */\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.last = (hold & 0x01)/*BITS(1)*/;\n //--- DROPBITS(1) ---//\n hold >>>= 1;\n bits -= 1;\n //---//\n\n switch ((hold & 0x03)/*BITS(2)*/) {\n case 0: /* stored block */\n //Tracev((stderr, \"inflate: stored block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = STORED;\n break;\n case 1: /* fixed block */\n fixedtables(state);\n //Tracev((stderr, \"inflate: fixed codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = LEN_; /* decode codes */\n if (flush === Z_TREES) {\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break inf_leave;\n }\n break;\n case 2: /* dynamic block */\n //Tracev((stderr, \"inflate: dynamic codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = TABLE;\n break;\n case 3:\n strm.msg = 'invalid block type';\n state.mode = BAD;\n }\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break;\n case STORED:\n //--- BYTEBITS() ---// /* go to byte boundary */\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) {\n strm.msg = 'invalid stored block lengths';\n state.mode = BAD;\n break;\n }\n state.length = hold & 0xffff;\n //Tracev((stderr, \"inflate: stored length %u\\n\",\n // state.length));\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = COPY_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case COPY_:\n state.mode = COPY;\n /* falls through */\n case COPY:\n copy = state.length;\n if (copy) {\n if (copy > have) { copy = have; }\n if (copy > left) { copy = left; }\n if (copy === 0) { break inf_leave; }\n //--- zmemcpy(put, next, copy); ---\n utils.arraySet(output, input, next, copy, put);\n //---//\n have -= copy;\n next += copy;\n left -= copy;\n put += copy;\n state.length -= copy;\n break;\n }\n //Tracev((stderr, \"inflate: stored end\\n\"));\n state.mode = TYPE;\n break;\n case TABLE:\n //=== NEEDBITS(14); */\n while (bits < 14) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4;\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n//#ifndef PKZIP_BUG_WORKAROUND\n if (state.nlen > 286 || state.ndist > 30) {\n strm.msg = 'too many length or distance symbols';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracev((stderr, \"inflate: table sizes ok\\n\"));\n state.have = 0;\n state.mode = LENLENS;\n /* falls through */\n case LENLENS:\n while (state.have < state.ncode) {\n //=== NEEDBITS(3);\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.lens[order[state.have++]] = (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n while (state.have < 19) {\n state.lens[order[state.have++]] = 0;\n }\n // We have separate tables & no pointers. 2 commented lines below not needed.\n //state.next = state.codes;\n //state.lencode = state.next;\n // Switch to use dynamic table\n state.lencode = state.lendyn;\n state.lenbits = 7;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts);\n state.lenbits = opts.bits;\n\n if (ret) {\n strm.msg = 'invalid code lengths set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, \"inflate: code lengths ok\\n\"));\n state.have = 0;\n state.mode = CODELENS;\n /* falls through */\n case CODELENS:\n while (state.have < state.nlen + state.ndist) {\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_val < 16) {\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.lens[state.have++] = here_val;\n }\n else {\n if (here_val === 16) {\n //=== NEEDBITS(here.bits + 2);\n n = here_bits + 2;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n if (state.have === 0) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n len = state.lens[state.have - 1];\n copy = 3 + (hold & 0x03);//BITS(2);\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n }\n else if (here_val === 17) {\n //=== NEEDBITS(here.bits + 3);\n n = here_bits + 3;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 3 + (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n else {\n //=== NEEDBITS(here.bits + 7);\n n = here_bits + 7;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 11 + (hold & 0x7f);//BITS(7);\n //--- DROPBITS(7) ---//\n hold >>>= 7;\n bits -= 7;\n //---//\n }\n if (state.have + copy > state.nlen + state.ndist) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n while (copy--) {\n state.lens[state.have++] = len;\n }\n }\n }\n\n /* handle error breaks in while */\n if (state.mode === BAD) { break; }\n\n /* check for end-of-block code (better have one) */\n if (state.lens[256] === 0) {\n strm.msg = 'invalid code -- missing end-of-block';\n state.mode = BAD;\n break;\n }\n\n /* build code tables -- note: do not change the lenbits or distbits\n values here (9 and 6) without reading the comments in inftrees.h\n concerning the ENOUGH constants, which depend on those values */\n state.lenbits = 9;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.lenbits = opts.bits;\n // state.lencode = state.next;\n\n if (ret) {\n strm.msg = 'invalid literal/lengths set';\n state.mode = BAD;\n break;\n }\n\n state.distbits = 6;\n //state.distcode.copy(state.codes);\n // Switch to use dynamic table\n state.distcode = state.distdyn;\n opts = { bits: state.distbits };\n ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.distbits = opts.bits;\n // state.distcode = state.next;\n\n if (ret) {\n strm.msg = 'invalid distances set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, 'inflate: codes ok\\n'));\n state.mode = LEN_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case LEN_:\n state.mode = LEN;\n /* falls through */\n case LEN:\n if (have >= 6 && left >= 258) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n inflate_fast(strm, _out);\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n if (state.mode === TYPE) {\n state.back = -1;\n }\n break;\n }\n state.back = 0;\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if (here_bits <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_op && (here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.lencode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n state.length = here_val;\n if (here_op === 0) {\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n state.mode = LIT;\n break;\n }\n if (here_op & 32) {\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.back = -1;\n state.mode = TYPE;\n break;\n }\n if (here_op & 64) {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break;\n }\n state.extra = here_op & 15;\n state.mode = LENEXT;\n /* falls through */\n case LENEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", state.length));\n state.was = state.length;\n state.mode = DIST;\n /* falls through */\n case DIST:\n for (;;) {\n here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if ((here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.distcode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n if (here_op & 64) {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break;\n }\n state.offset = here_val;\n state.extra = (here_op) & 15;\n state.mode = DISTEXT;\n /* falls through */\n case DISTEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n//#ifdef INFLATE_STRICT\n if (state.offset > state.dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracevv((stderr, \"inflate: distance %u\\n\", state.offset));\n state.mode = MATCH;\n /* falls through */\n case MATCH:\n if (left === 0) { break inf_leave; }\n copy = _out - left;\n if (state.offset > copy) { /* copy from window */\n copy = state.offset - copy;\n if (copy > state.whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// Trace((stderr, \"inflate.c too far\\n\"));\n// copy -= state.whave;\n// if (copy > state.length) { copy = state.length; }\n// if (copy > left) { copy = left; }\n// left -= copy;\n// state.length -= copy;\n// do {\n// output[put++] = 0;\n// } while (--copy);\n// if (state.length === 0) { state.mode = LEN; }\n// break;\n//#endif\n }\n if (copy > state.wnext) {\n copy -= state.wnext;\n from = state.wsize - copy;\n }\n else {\n from = state.wnext - copy;\n }\n if (copy > state.length) { copy = state.length; }\n from_source = state.window;\n }\n else { /* copy from output */\n from_source = output;\n from = put - state.offset;\n copy = state.length;\n }\n if (copy > left) { copy = left; }\n left -= copy;\n state.length -= copy;\n do {\n output[put++] = from_source[from++];\n } while (--copy);\n if (state.length === 0) { state.mode = LEN; }\n break;\n case LIT:\n if (left === 0) { break inf_leave; }\n output[put++] = state.length;\n left--;\n state.mode = LEN;\n break;\n case CHECK:\n if (state.wrap) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n // Use '|' instead of '+' to make sure that result is signed\n hold |= input[next++] << bits;\n bits += 8;\n }\n //===//\n _out -= left;\n strm.total_out += _out;\n state.total += _out;\n if (_out) {\n strm.adler = state.check =\n /*UPDATE(state.check, put - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out));\n\n }\n _out = left;\n // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too\n if ((state.flags ? hold : zswap32(hold)) !== state.check) {\n strm.msg = 'incorrect data check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: check matches trailer\\n\"));\n }\n state.mode = LENGTH;\n /* falls through */\n case LENGTH:\n if (state.wrap && state.flags) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.total & 0xffffffff)) {\n strm.msg = 'incorrect length check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: length matches trailer\\n\"));\n }\n state.mode = DONE;\n /* falls through */\n case DONE:\n ret = Z_STREAM_END;\n break inf_leave;\n case BAD:\n ret = Z_DATA_ERROR;\n break inf_leave;\n case MEM:\n return Z_MEM_ERROR;\n case SYNC:\n /* falls through */\n default:\n return Z_STREAM_ERROR;\n }\n }\n\n // inf_leave <- here is real place for \"goto inf_leave\", emulated via \"break inf_leave\"\n\n /*\n Return from inflate(), updating the total counts and the check value.\n If there was no progress during the inflate() call, return a buffer\n error. Call updatewindow() to create and/or update the window state.\n Note: a memory error from inflate() is non-recoverable.\n */\n\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n\n if (state.wsize || (_out !== strm.avail_out && state.mode < BAD &&\n (state.mode < CHECK || flush !== Z_FINISH))) {\n if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n }\n _in -= strm.avail_in;\n _out -= strm.avail_out;\n strm.total_in += _in;\n strm.total_out += _out;\n state.total += _out;\n if (state.wrap && _out) {\n strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out));\n }\n strm.data_type = state.bits + (state.last ? 64 : 0) +\n (state.mode === TYPE ? 128 : 0) +\n (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0);\n if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) {\n ret = Z_BUF_ERROR;\n }\n return ret;\n}\n\nfunction inflateEnd(strm) {\n\n if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) {\n return Z_STREAM_ERROR;\n }\n\n var state = strm.state;\n if (state.window) {\n state.window = null;\n }\n strm.state = null;\n return Z_OK;\n}\n\nfunction inflateGetHeader(strm, head) {\n var state;\n\n /* check state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; }\n\n /* save header structure */\n state.head = head;\n head.done = false;\n return Z_OK;\n}\n\nfunction inflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var state;\n var dictid;\n var ret;\n\n /* check state */\n if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n if (state.wrap !== 0 && state.mode !== DICT) {\n return Z_STREAM_ERROR;\n }\n\n /* check for correct dictionary identifier */\n if (state.mode === DICT) {\n dictid = 1; /* adler32(0, null, 0)*/\n /* dictid = adler32(dictid, dictionary, dictLength); */\n dictid = adler32(dictid, dictionary, dictLength, 0);\n if (dictid !== state.check) {\n return Z_DATA_ERROR;\n }\n }\n /* copy dictionary to window using updatewindow(), which will amend the\n existing dictionary if appropriate */\n ret = updatewindow(strm, dictionary, dictLength, dictLength);\n if (ret) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n state.havedict = 1;\n // Tracev((stderr, \"inflate: dictionary set\\n\"));\n return Z_OK;\n}\n\nexports.inflateReset = inflateReset;\nexports.inflateReset2 = inflateReset2;\nexports.inflateResetKeep = inflateResetKeep;\nexports.inflateInit = inflateInit;\nexports.inflateInit2 = inflateInit2;\nexports.inflate = inflate;\nexports.inflateEnd = inflateEnd;\nexports.inflateGetHeader = inflateGetHeader;\nexports.inflateSetDictionary = inflateSetDictionary;\nexports.inflateInfo = 'pako inflate (from Nodeca project)';\n\n/* Not implemented\nexports.inflateCopy = inflateCopy;\nexports.inflateGetDictionary = inflateGetDictionary;\nexports.inflateMark = inflateMark;\nexports.inflatePrime = inflatePrime;\nexports.inflateSync = inflateSync;\nexports.inflateSyncPoint = inflateSyncPoint;\nexports.inflateUndermine = inflateUndermine;\n*/\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nmodule.exports = {\n\n /* Allowed flush values; see deflate() and inflate() below for details */\n Z_NO_FLUSH: 0,\n Z_PARTIAL_FLUSH: 1,\n Z_SYNC_FLUSH: 2,\n Z_FULL_FLUSH: 3,\n Z_FINISH: 4,\n Z_BLOCK: 5,\n Z_TREES: 6,\n\n /* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\n Z_OK: 0,\n Z_STREAM_END: 1,\n Z_NEED_DICT: 2,\n Z_ERRNO: -1,\n Z_STREAM_ERROR: -2,\n Z_DATA_ERROR: -3,\n //Z_MEM_ERROR: -4,\n Z_BUF_ERROR: -5,\n //Z_VERSION_ERROR: -6,\n\n /* compression levels */\n Z_NO_COMPRESSION: 0,\n Z_BEST_SPEED: 1,\n Z_BEST_COMPRESSION: 9,\n Z_DEFAULT_COMPRESSION: -1,\n\n\n Z_FILTERED: 1,\n Z_HUFFMAN_ONLY: 2,\n Z_RLE: 3,\n Z_FIXED: 4,\n Z_DEFAULT_STRATEGY: 0,\n\n /* Possible values of the data_type field (though see inflate()) */\n Z_BINARY: 0,\n Z_TEXT: 1,\n //Z_ASCII: 1, // = Z_TEXT (deprecated)\n Z_UNKNOWN: 2,\n\n /* The deflate compression method */\n Z_DEFLATED: 8\n //Z_NULL: null // Use -1 or null inline, depending on var type\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction GZheader() {\n /* true if compressed data believed to be text */\n this.text = 0;\n /* modification time */\n this.time = 0;\n /* extra flags (not used when writing a gzip file) */\n this.xflags = 0;\n /* operating system */\n this.os = 0;\n /* pointer to extra field or Z_NULL if none */\n this.extra = null;\n /* extra field length (valid if extra != Z_NULL) */\n this.extra_len = 0; // Actually, we don't need it in JS,\n // but leave for few code modifications\n\n //\n // Setup limits is not necessary because in js we should not preallocate memory\n // for inflate use constant limit in 65536 bytes\n //\n\n /* space at extra (only when reading header) */\n // this.extra_max = 0;\n /* pointer to zero-terminated file name or Z_NULL */\n this.name = '';\n /* space at name (only when reading header) */\n // this.name_max = 0;\n /* pointer to zero-terminated comment or Z_NULL */\n this.comment = '';\n /* space at comment (only when reading header) */\n // this.comm_max = 0;\n /* true if there was or will be a header crc */\n this.hcrc = 0;\n /* true when done reading gzip header (not used when writing a gzip file) */\n this.done = false;\n}\n\nmodule.exports = GZheader;\n","'use strict';\n\n\nvar zlib_inflate = require('./zlib/inflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar c = require('./zlib/constants');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\nvar GZheader = require('./zlib/gzheader');\n\nvar toString = Object.prototype.toString;\n\n/**\n * class Inflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[inflate]]\n * and [[inflateRaw]].\n **/\n\n/* internal\n * inflate.chunks -> Array\n *\n * Chunks of output data, if [[Inflate#onData]] not overridden.\n **/\n\n/**\n * Inflate.result -> Uint8Array|Array|String\n *\n * Uncompressed result, generated by default [[Inflate#onData]]\n * and [[Inflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Inflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Inflate.err -> Number\n *\n * Error code after inflate finished. 0 (Z_OK) on success.\n * Should be checked if broken data possible.\n **/\n\n/**\n * Inflate.msg -> String\n *\n * Error message, if [[Inflate.err]] != 0\n **/\n\n\n/**\n * new Inflate(options)\n * - options (Object): zlib inflate options.\n *\n * Creates new inflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `windowBits`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw inflate\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n * By default, when no options set, autodetect deflate/gzip data format via\n * wrapper header.\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var inflate = new pako.Inflate({ level: 3});\n *\n * inflate.push(chunk1, false);\n * inflate.push(chunk2, true); // true -> last chunk\n *\n * if (inflate.err) { throw new Error(inflate.err); }\n *\n * console.log(inflate.result);\n * ```\n **/\nfunction Inflate(options) {\n if (!(this instanceof Inflate)) return new Inflate(options);\n\n this.options = utils.assign({\n chunkSize: 16384,\n windowBits: 0,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n // Force window size for `raw` data, if not set directly,\n // because we have no header for autodetect.\n if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) {\n opt.windowBits = -opt.windowBits;\n if (opt.windowBits === 0) { opt.windowBits = -15; }\n }\n\n // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate\n if ((opt.windowBits >= 0) && (opt.windowBits < 16) &&\n !(options && options.windowBits)) {\n opt.windowBits += 32;\n }\n\n // Gzip header has no info about windows size, we can do autodetect only\n // for deflate. So, if window size not set, force it to max when gzip possible\n if ((opt.windowBits > 15) && (opt.windowBits < 48)) {\n // bit 3 (16) -> gzipped data\n // bit 4 (32) -> autodetect gzip/deflate\n if ((opt.windowBits & 15) === 0) {\n opt.windowBits |= 15;\n }\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_inflate.inflateInit2(\n this.strm,\n opt.windowBits\n );\n\n if (status !== c.Z_OK) {\n throw new Error(msg[status]);\n }\n\n this.header = new GZheader();\n\n zlib_inflate.inflateGetHeader(this.strm, this.header);\n\n // Setup dictionary\n if (opt.dictionary) {\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n opt.dictionary = strings.string2buf(opt.dictionary);\n } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {\n opt.dictionary = new Uint8Array(opt.dictionary);\n }\n if (opt.raw) { //In raw mode we need to set the dictionary early\n status = zlib_inflate.inflateSetDictionary(this.strm, opt.dictionary);\n if (status !== c.Z_OK) {\n throw new Error(msg[status]);\n }\n }\n }\n}\n\n/**\n * Inflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH.\n *\n * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with\n * new output chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the decompression context.\n *\n * On fail call [[Inflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nInflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var dictionary = this.options.dictionary;\n var status, _mode;\n var next_out_utf8, tail, utf8str;\n\n // Flag to properly process Z_BUF_ERROR on testing inflate call\n // when we check that all output data was flushed.\n var allowBufError = false;\n\n if (this.ended) { return false; }\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // Only binary strings can be decompressed on practice\n strm.input = strings.binstring2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n\n status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */\n\n if (status === c.Z_NEED_DICT && dictionary) {\n status = zlib_inflate.inflateSetDictionary(this.strm, dictionary);\n }\n\n if (status === c.Z_BUF_ERROR && allowBufError === true) {\n status = c.Z_OK;\n allowBufError = false;\n }\n\n if (status !== c.Z_STREAM_END && status !== c.Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n\n if (strm.next_out) {\n if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) {\n\n if (this.options.to === 'string') {\n\n next_out_utf8 = strings.utf8border(strm.output, strm.next_out);\n\n tail = strm.next_out - next_out_utf8;\n utf8str = strings.buf2string(strm.output, next_out_utf8);\n\n // move tail\n strm.next_out = tail;\n strm.avail_out = chunkSize - tail;\n if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); }\n\n this.onData(utf8str);\n\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n }\n\n // When no more input data, we should check that internal inflate buffers\n // are flushed. The only way to do it when avail_out = 0 - run one more\n // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR.\n // Here we set flag to process this error properly.\n //\n // NOTE. Deflate does not return error in this case and does not needs such\n // logic.\n if (strm.avail_in === 0 && strm.avail_out === 0) {\n allowBufError = true;\n }\n\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END);\n\n if (status === c.Z_STREAM_END) {\n _mode = c.Z_FINISH;\n }\n\n // Finalize on the last chunk.\n if (_mode === c.Z_FINISH) {\n status = zlib_inflate.inflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === c.Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === c.Z_SYNC_FLUSH) {\n this.onEnd(c.Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Inflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): output data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nInflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Inflate#onEnd(status) -> Void\n * - status (Number): inflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called either after you tell inflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nInflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === c.Z_OK) {\n if (this.options.to === 'string') {\n // Glue & convert here, until we teach pako to send\n // utf8 aligned strings to onData\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * inflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Decompress `data` with inflate/ungzip and `options`. Autodetect\n * format via wrapper header by default. That's why we don't provide\n * separate `ungzip` method.\n *\n * Supported options are:\n *\n * - windowBits\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , input = pako.deflate([1,2,3,4,5,6,7,8,9])\n * , output;\n *\n * try {\n * output = pako.inflate(input);\n * } catch (err)\n * console.log(err);\n * }\n * ```\n **/\nfunction inflate(input, options) {\n var inflator = new Inflate(options);\n\n inflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (inflator.err) { throw inflator.msg || msg[inflator.err]; }\n\n return inflator.result;\n}\n\n\n/**\n * inflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * The same as [[inflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction inflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return inflate(input, options);\n}\n\n\n/**\n * ungzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Just shortcut to [[inflate]], because it autodetects format\n * by header.content. Done for convenience.\n **/\n\n\nexports.Inflate = Inflate;\nexports.inflate = inflate;\nexports.inflateRaw = inflateRaw;\nexports.ungzip = inflate;\n","// Top level file is just a mixin of submodules & constants\n'use strict';\n\nvar assign = require('./lib/utils/common').assign;\n\nvar deflate = require('./lib/deflate');\nvar inflate = require('./lib/inflate');\nvar constants = require('./lib/zlib/constants');\n\nvar pako = {};\n\nassign(pako, deflate, inflate, constants);\n\nmodule.exports = pako;\n","/*\n * The `chars`, `lookup`, and `decodeFromBase64` members of this file are\n * licensed under the following:\n *\n * base64-arraybuffer\n * https://github.com/niklasvh/base64-arraybuffer\n *\n * Copyright (c) 2012 Niklas von Hertzen\n * Licensed under the MIT license.\n *\n */\nimport pako from 'pako';\nvar chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';\n// Use a lookup table to find the index.\nvar lookup = new Uint8Array(256);\nfor (var i = 0; i < chars.length; i++) {\n lookup[chars.charCodeAt(i)] = i;\n}\nexport var decodeFromBase64 = function (base64) {\n var bufferLength = base64.length * 0.75;\n var len = base64.length;\n var i;\n var p = 0;\n var encoded1;\n var encoded2;\n var encoded3;\n var encoded4;\n if (base64[base64.length - 1] === '=') {\n bufferLength--;\n if (base64[base64.length - 2] === '=') {\n bufferLength--;\n }\n }\n var bytes = new Uint8Array(bufferLength);\n for (i = 0; i < len; i += 4) {\n encoded1 = lookup[base64.charCodeAt(i)];\n encoded2 = lookup[base64.charCodeAt(i + 1)];\n encoded3 = lookup[base64.charCodeAt(i + 2)];\n encoded4 = lookup[base64.charCodeAt(i + 3)];\n bytes[p++] = (encoded1 << 2) | (encoded2 >> 4);\n bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2);\n bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63);\n }\n return bytes;\n};\nvar arrayToString = function (array) {\n var str = '';\n for (var i = 0; i < array.length; i++) {\n str += String.fromCharCode(array[i]);\n }\n return str;\n};\nexport var decompressJson = function (compressedJson) {\n return arrayToString(pako.inflate(decodeFromBase64(compressedJson)));\n};\nexport var padStart = function (value, length, padChar) {\n var padding = '';\n for (var idx = 0, len = length - value.length; idx < len; idx++) {\n padding += padChar;\n }\n return padding + value;\n};\n","import { decompressJson } from './utils';\nimport CourierBoldCompressed from './Courier-Bold.compressed.json';\nimport CourierBoldObliqueCompressed from './Courier-BoldOblique.compressed.json';\nimport CourierObliqueCompressed from './Courier-Oblique.compressed.json';\nimport CourierCompressed from './Courier.compressed.json';\nimport HelveticaBoldCompressed from './Helvetica-Bold.compressed.json';\nimport HelveticaBoldObliqueCompressed from './Helvetica-BoldOblique.compressed.json';\nimport HelveticaObliqueCompressed from './Helvetica-Oblique.compressed.json';\nimport HelveticaCompressed from './Helvetica.compressed.json';\nimport TimesBoldCompressed from './Times-Bold.compressed.json';\nimport TimesBoldItalicCompressed from './Times-BoldItalic.compressed.json';\nimport TimesItalicCompressed from './Times-Italic.compressed.json';\nimport TimesRomanCompressed from './Times-Roman.compressed.json';\nimport SymbolCompressed from './Symbol.compressed.json';\nimport ZapfDingbatsCompressed from './ZapfDingbats.compressed.json';\n// prettier-ignore\nvar compressedJsonForFontName = {\n 'Courier': CourierCompressed,\n 'Courier-Bold': CourierBoldCompressed,\n 'Courier-Oblique': CourierObliqueCompressed,\n 'Courier-BoldOblique': CourierBoldObliqueCompressed,\n 'Helvetica': HelveticaCompressed,\n 'Helvetica-Bold': HelveticaBoldCompressed,\n 'Helvetica-Oblique': HelveticaObliqueCompressed,\n 'Helvetica-BoldOblique': HelveticaBoldObliqueCompressed,\n 'Times-Roman': TimesRomanCompressed,\n 'Times-Bold': TimesBoldCompressed,\n 'Times-Italic': TimesItalicCompressed,\n 'Times-BoldItalic': TimesBoldItalicCompressed,\n 'Symbol': SymbolCompressed,\n 'ZapfDingbats': ZapfDingbatsCompressed,\n};\nexport var FontNames;\n(function (FontNames) {\n FontNames[\"Courier\"] = \"Courier\";\n FontNames[\"CourierBold\"] = \"Courier-Bold\";\n FontNames[\"CourierOblique\"] = \"Courier-Oblique\";\n FontNames[\"CourierBoldOblique\"] = \"Courier-BoldOblique\";\n FontNames[\"Helvetica\"] = \"Helvetica\";\n FontNames[\"HelveticaBold\"] = \"Helvetica-Bold\";\n FontNames[\"HelveticaOblique\"] = \"Helvetica-Oblique\";\n FontNames[\"HelveticaBoldOblique\"] = \"Helvetica-BoldOblique\";\n FontNames[\"TimesRoman\"] = \"Times-Roman\";\n FontNames[\"TimesRomanBold\"] = \"Times-Bold\";\n FontNames[\"TimesRomanItalic\"] = \"Times-Italic\";\n FontNames[\"TimesRomanBoldItalic\"] = \"Times-BoldItalic\";\n FontNames[\"Symbol\"] = \"Symbol\";\n FontNames[\"ZapfDingbats\"] = \"ZapfDingbats\";\n})(FontNames || (FontNames = {}));\nvar fontCache = {};\nvar Font = /** @class */ (function () {\n function Font() {\n var _this = this;\n this.getWidthOfGlyph = function (glyphName) {\n return _this.CharWidths[glyphName];\n };\n this.getXAxisKerningForPair = function (leftGlyphName, rightGlyphName) {\n return (_this.KernPairXAmounts[leftGlyphName] || {})[rightGlyphName];\n };\n }\n Font.load = function (fontName) {\n var cachedFont = fontCache[fontName];\n if (cachedFont)\n return cachedFont;\n var json = decompressJson(compressedJsonForFontName[fontName]);\n var font = Object.assign(new Font(), JSON.parse(json));\n font.CharWidths = font.CharMetrics.reduce(function (acc, metric) {\n acc[metric.N] = metric.WX;\n return acc;\n }, {});\n font.KernPairXAmounts = font.KernPairs.reduce(function (acc, _a) {\n var name1 = _a[0], name2 = _a[1], width = _a[2];\n if (!acc[name1])\n acc[name1] = {};\n acc[name1][name2] = width;\n return acc;\n }, {});\n fontCache[fontName] = font;\n return font;\n };\n return Font;\n}());\nexport { Font };\n","import { toCharCode } from \"./strings\";\n// Mapping from PDFDocEncoding to Unicode code point\nvar pdfDocEncodingToUnicode = new Uint16Array(256);\n// Initialize the code points which are the same\nfor (var idx = 0; idx < 256; idx++) {\n pdfDocEncodingToUnicode[idx] = idx;\n}\n// Set differences (see \"Table D.2 – PDFDocEncoding Character Set\" of the PDF spec)\npdfDocEncodingToUnicode[0x16] = toCharCode('\\u0017'); // SYNCRONOUS IDLE\npdfDocEncodingToUnicode[0x18] = toCharCode('\\u02D8'); // BREVE\npdfDocEncodingToUnicode[0x19] = toCharCode('\\u02C7'); // CARON\npdfDocEncodingToUnicode[0x1a] = toCharCode('\\u02C6'); // MODIFIER LETTER CIRCUMFLEX ACCENT\npdfDocEncodingToUnicode[0x1b] = toCharCode('\\u02D9'); // DOT ABOVE\npdfDocEncodingToUnicode[0x1c] = toCharCode('\\u02DD'); // DOUBLE ACUTE ACCENT\npdfDocEncodingToUnicode[0x1d] = toCharCode('\\u02DB'); // OGONEK\npdfDocEncodingToUnicode[0x1e] = toCharCode('\\u02DA'); // RING ABOVE\npdfDocEncodingToUnicode[0x1f] = toCharCode('\\u02DC'); // SMALL TILDE\npdfDocEncodingToUnicode[0x7f] = toCharCode('\\uFFFD'); // REPLACEMENT CHARACTER (box with questionmark)\npdfDocEncodingToUnicode[0x80] = toCharCode('\\u2022'); // BULLET\npdfDocEncodingToUnicode[0x81] = toCharCode('\\u2020'); // DAGGER\npdfDocEncodingToUnicode[0x82] = toCharCode('\\u2021'); // DOUBLE DAGGER\npdfDocEncodingToUnicode[0x83] = toCharCode('\\u2026'); // HORIZONTAL ELLIPSIS\npdfDocEncodingToUnicode[0x84] = toCharCode('\\u2014'); // EM DASH\npdfDocEncodingToUnicode[0x85] = toCharCode('\\u2013'); // EN DASH\npdfDocEncodingToUnicode[0x86] = toCharCode('\\u0192'); // LATIN SMALL LETTER SCRIPT F\npdfDocEncodingToUnicode[0x87] = toCharCode('\\u2044'); // FRACTION SLASH (solidus)\npdfDocEncodingToUnicode[0x88] = toCharCode('\\u2039'); // SINGLE LEFT-POINTING ANGLE QUOTATION MARK\npdfDocEncodingToUnicode[0x89] = toCharCode('\\u203A'); // SINGLE RIGHT-POINTING ANGLE QUOTATION MARK\npdfDocEncodingToUnicode[0x8a] = toCharCode('\\u2212'); // MINUS SIGN\npdfDocEncodingToUnicode[0x8b] = toCharCode('\\u2030'); // PER MILLE SIGN\npdfDocEncodingToUnicode[0x8c] = toCharCode('\\u201E'); // DOUBLE LOW-9 QUOTATION MARK (quotedblbase)\npdfDocEncodingToUnicode[0x8d] = toCharCode('\\u201C'); // LEFT DOUBLE QUOTATION MARK (quotedblleft)\npdfDocEncodingToUnicode[0x8e] = toCharCode('\\u201D'); // RIGHT DOUBLE QUOTATION MARK (quotedblright)\npdfDocEncodingToUnicode[0x8f] = toCharCode('\\u2018'); // LEFT SINGLE QUOTATION MARK (quoteleft)\npdfDocEncodingToUnicode[0x90] = toCharCode('\\u2019'); // RIGHT SINGLE QUOTATION MARK (quoteright)\npdfDocEncodingToUnicode[0x91] = toCharCode('\\u201A'); // SINGLE LOW-9 QUOTATION MARK (quotesinglbase)\npdfDocEncodingToUnicode[0x92] = toCharCode('\\u2122'); // TRADE MARK SIGN\npdfDocEncodingToUnicode[0x93] = toCharCode('\\uFB01'); // LATIN SMALL LIGATURE FI\npdfDocEncodingToUnicode[0x94] = toCharCode('\\uFB02'); // LATIN SMALL LIGATURE FL\npdfDocEncodingToUnicode[0x95] = toCharCode('\\u0141'); // LATIN CAPITAL LETTER L WITH STROKE\npdfDocEncodingToUnicode[0x96] = toCharCode('\\u0152'); // LATIN CAPITAL LIGATURE OE\npdfDocEncodingToUnicode[0x97] = toCharCode('\\u0160'); // LATIN CAPITAL LETTER S WITH CARON\npdfDocEncodingToUnicode[0x98] = toCharCode('\\u0178'); // LATIN CAPITAL LETTER Y WITH DIAERESIS\npdfDocEncodingToUnicode[0x99] = toCharCode('\\u017D'); // LATIN CAPITAL LETTER Z WITH CARON\npdfDocEncodingToUnicode[0x9a] = toCharCode('\\u0131'); // LATIN SMALL LETTER DOTLESS I\npdfDocEncodingToUnicode[0x9b] = toCharCode('\\u0142'); // LATIN SMALL LETTER L WITH STROKE\npdfDocEncodingToUnicode[0x9c] = toCharCode('\\u0153'); // LATIN SMALL LIGATURE OE\npdfDocEncodingToUnicode[0x9d] = toCharCode('\\u0161'); // LATIN SMALL LETTER S WITH CARON\npdfDocEncodingToUnicode[0x9e] = toCharCode('\\u017E'); // LATIN SMALL LETTER Z WITH CARON\npdfDocEncodingToUnicode[0x9f] = toCharCode('\\uFFFD'); // REPLACEMENT CHARACTER (box with questionmark)\npdfDocEncodingToUnicode[0xa0] = toCharCode('\\u20AC'); // EURO SIGN\npdfDocEncodingToUnicode[0xad] = toCharCode('\\uFFFD'); // REPLACEMENT CHARACTER (box with questionmark)\n/**\n * Decode a byte array into a string using PDFDocEncoding.\n *\n * @param bytes a byte array (decimal representation) containing a string\n * encoded with PDFDocEncoding.\n */\nexport var pdfDocEncodingDecode = function (bytes) {\n var codePoints = new Array(bytes.length);\n for (var idx = 0, len = bytes.length; idx < len; idx++) {\n codePoints[idx] = pdfDocEncodingToUnicode[bytes[idx]];\n }\n return String.fromCodePoint.apply(String, codePoints);\n};\n//# sourceMappingURL=pdfDocEncoding.js.map","/* tslint:disable max-classes-per-file */\nimport { decompressJson, padStart } from './utils';\nimport AllEncodingsCompressed from './all-encodings.compressed.json';\nvar decompressedEncodings = decompressJson(AllEncodingsCompressed);\nvar allUnicodeMappings = JSON.parse(decompressedEncodings);\nvar Encoding = /** @class */ (function () {\n function Encoding(name, unicodeMappings) {\n var _this = this;\n this.canEncodeUnicodeCodePoint = function (codePoint) {\n return codePoint in _this.unicodeMappings;\n };\n this.encodeUnicodeCodePoint = function (codePoint) {\n var mapped = _this.unicodeMappings[codePoint];\n if (!mapped) {\n var str = String.fromCharCode(codePoint);\n var hexCode = \"0x\" + padStart(codePoint.toString(16), 4, '0');\n var msg = _this.name + \" cannot encode \\\"\" + str + \"\\\" (\" + hexCode + \")\";\n throw new Error(msg);\n }\n return { code: mapped[0], name: mapped[1] };\n };\n this.name = name;\n this.supportedCodePoints = Object.keys(unicodeMappings)\n .map(Number)\n .sort(function (a, b) { return a - b; });\n this.unicodeMappings = unicodeMappings;\n }\n return Encoding;\n}());\nexport var Encodings = {\n Symbol: new Encoding('Symbol', allUnicodeMappings.symbol),\n ZapfDingbats: new Encoding('ZapfDingbats', allUnicodeMappings.zapfdingbats),\n WinAnsi: new Encoding('WinAnsi', allUnicodeMappings.win1252),\n};\n","import { FontNames } from '@pdf-lib/standard-fonts';\nexport var values = function (obj) { return Object.keys(obj).map(function (k) { return obj[k]; }); };\nexport var StandardFontValues = values(FontNames);\nexport var isStandardFont = function (input) {\n return StandardFontValues.includes(input);\n};\nexport var rectanglesAreEqual = function (a, b) { return a.x === b.x && a.y === b.y && a.width === b.width && a.height === b.height; };\n//# sourceMappingURL=objects.js.map","/* tslint:disable:ban-types */\nimport { values as objectValues } from \"./objects\";\nexport var backtick = function (val) { return \"`\" + val + \"`\"; };\nexport var singleQuote = function (val) { return \"'\" + val + \"'\"; };\n// prettier-ignore\nvar formatValue = function (value) {\n var type = typeof value;\n if (type === 'string')\n return singleQuote(value);\n else if (type === 'undefined')\n return backtick(value);\n else\n return value;\n};\nexport var createValueErrorMsg = function (value, valueName, values) {\n var allowedValues = new Array(values.length);\n for (var idx = 0, len = values.length; idx < len; idx++) {\n var v = values[idx];\n allowedValues[idx] = formatValue(v);\n }\n var joinedValues = allowedValues.join(' or ');\n // prettier-ignore\n return backtick(valueName) + \" must be one of \" + joinedValues + \", but was actually \" + formatValue(value);\n};\nexport var assertIsOneOf = function (value, valueName, allowedValues) {\n if (!Array.isArray(allowedValues)) {\n allowedValues = objectValues(allowedValues);\n }\n for (var idx = 0, len = allowedValues.length; idx < len; idx++) {\n if (value === allowedValues[idx])\n return;\n }\n throw new TypeError(createValueErrorMsg(value, valueName, allowedValues));\n};\nexport var assertIsOneOfOrUndefined = function (value, valueName, allowedValues) {\n if (!Array.isArray(allowedValues)) {\n allowedValues = objectValues(allowedValues);\n }\n assertIsOneOf(value, valueName, allowedValues.concat(undefined));\n};\nexport var assertIsSubset = function (values, valueName, allowedValues) {\n if (!Array.isArray(allowedValues)) {\n allowedValues = objectValues(allowedValues);\n }\n for (var idx = 0, len = values.length; idx < len; idx++) {\n assertIsOneOf(values[idx], valueName, allowedValues);\n }\n};\nexport var getType = function (val) {\n if (val === null)\n return 'null';\n if (val === undefined)\n return 'undefined';\n if (typeof val === 'string')\n return 'string';\n if (isNaN(val))\n return 'NaN';\n if (typeof val === 'number')\n return 'number';\n if (typeof val === 'boolean')\n return 'boolean';\n if (typeof val === 'symbol')\n return 'symbol';\n if (typeof val === 'bigint')\n return 'bigint';\n if (val.constructor && val.constructor.name)\n return val.constructor.name;\n if (val.name)\n return val.name;\n if (val.constructor)\n return String(val.constructor);\n return String(val);\n};\nexport var isType = function (value, type) {\n if (type === 'null')\n return value === null;\n if (type === 'undefined')\n return value === undefined;\n if (type === 'string')\n return typeof value === 'string';\n if (type === 'number')\n return typeof value === 'number' && !isNaN(value);\n if (type === 'boolean')\n return typeof value === 'boolean';\n if (type === 'symbol')\n return typeof value === 'symbol';\n if (type === 'bigint')\n return typeof value === 'bigint';\n if (type === Date)\n return value instanceof Date;\n if (type === Array)\n return value instanceof Array;\n if (type === Uint8Array)\n return value instanceof Uint8Array;\n if (type === ArrayBuffer)\n return value instanceof ArrayBuffer;\n if (type === Function)\n return value instanceof Function;\n return value instanceof type[0];\n};\nexport var createTypeErrorMsg = function (value, valueName, types) {\n var allowedTypes = new Array(types.length);\n for (var idx = 0, len = types.length; idx < len; idx++) {\n var type = types[idx];\n if (type === 'null')\n allowedTypes[idx] = backtick('null');\n if (type === 'undefined')\n allowedTypes[idx] = backtick('undefined');\n if (type === 'string')\n allowedTypes[idx] = backtick('string');\n else if (type === 'number')\n allowedTypes[idx] = backtick('number');\n else if (type === 'boolean')\n allowedTypes[idx] = backtick('boolean');\n else if (type === 'symbol')\n allowedTypes[idx] = backtick('symbol');\n else if (type === 'bigint')\n allowedTypes[idx] = backtick('bigint');\n else if (type === Array)\n allowedTypes[idx] = backtick('Array');\n else if (type === Uint8Array)\n allowedTypes[idx] = backtick('Uint8Array');\n else if (type === ArrayBuffer)\n allowedTypes[idx] = backtick('ArrayBuffer');\n else\n allowedTypes[idx] = backtick(type[1]);\n }\n var joinedTypes = allowedTypes.join(' or ');\n // prettier-ignore\n return backtick(valueName) + \" must be of type \" + joinedTypes + \", but was actually of type \" + backtick(getType(value));\n};\nexport var assertIs = function (value, valueName, types) {\n for (var idx = 0, len = types.length; idx < len; idx++) {\n if (isType(value, types[idx]))\n return;\n }\n throw new TypeError(createTypeErrorMsg(value, valueName, types));\n};\nexport var assertOrUndefined = function (value, valueName, types) {\n assertIs(value, valueName, types.concat('undefined'));\n};\nexport var assertEachIs = function (values, valueName, types) {\n for (var idx = 0, len = values.length; idx < len; idx++) {\n assertIs(values[idx], valueName, types);\n }\n};\nexport var assertRange = function (value, valueName, min, max) {\n assertIs(value, valueName, ['number']);\n assertIs(min, 'min', ['number']);\n assertIs(max, 'max', ['number']);\n max = Math.max(min, max);\n if (value < min || value > max) {\n // prettier-ignore\n throw new Error(backtick(valueName) + \" must be at least \" + min + \" and at most \" + max + \", but was actually \" + value);\n }\n};\nexport var assertRangeOrUndefined = function (value, valueName, min, max) {\n assertIs(value, valueName, ['number', 'undefined']);\n if (typeof value === 'number')\n assertRange(value, valueName, min, max);\n};\nexport var assertMultiple = function (value, valueName, multiplier) {\n assertIs(value, valueName, ['number']);\n if (value % multiplier !== 0) {\n // prettier-ignore\n throw new Error(backtick(valueName) + \" must be a multiple of \" + multiplier + \", but was actually \" + value);\n }\n};\nexport var assertInteger = function (value, valueName) {\n if (!Number.isInteger(value)) {\n throw new Error(backtick(valueName) + \" must be an integer, but was actually \" + value);\n }\n};\nexport var assertPositive = function (value, valueName) {\n if (![1, 0].includes(Math.sign(value))) {\n // prettier-ignore\n throw new Error(backtick(valueName) + \" must be a positive number or 0, but was actually \" + value);\n }\n};\n//# sourceMappingURL=validators.js.map","var CharCodes;\n(function (CharCodes) {\n CharCodes[CharCodes[\"Null\"] = 0] = \"Null\";\n CharCodes[CharCodes[\"Backspace\"] = 8] = \"Backspace\";\n CharCodes[CharCodes[\"Tab\"] = 9] = \"Tab\";\n CharCodes[CharCodes[\"Newline\"] = 10] = \"Newline\";\n CharCodes[CharCodes[\"FormFeed\"] = 12] = \"FormFeed\";\n CharCodes[CharCodes[\"CarriageReturn\"] = 13] = \"CarriageReturn\";\n CharCodes[CharCodes[\"Space\"] = 32] = \"Space\";\n CharCodes[CharCodes[\"ExclamationPoint\"] = 33] = \"ExclamationPoint\";\n CharCodes[CharCodes[\"Hash\"] = 35] = \"Hash\";\n CharCodes[CharCodes[\"Percent\"] = 37] = \"Percent\";\n CharCodes[CharCodes[\"LeftParen\"] = 40] = \"LeftParen\";\n CharCodes[CharCodes[\"RightParen\"] = 41] = \"RightParen\";\n CharCodes[CharCodes[\"Plus\"] = 43] = \"Plus\";\n CharCodes[CharCodes[\"Minus\"] = 45] = \"Minus\";\n CharCodes[CharCodes[\"Dash\"] = 45] = \"Dash\";\n CharCodes[CharCodes[\"Period\"] = 46] = \"Period\";\n CharCodes[CharCodes[\"ForwardSlash\"] = 47] = \"ForwardSlash\";\n CharCodes[CharCodes[\"Zero\"] = 48] = \"Zero\";\n CharCodes[CharCodes[\"One\"] = 49] = \"One\";\n CharCodes[CharCodes[\"Two\"] = 50] = \"Two\";\n CharCodes[CharCodes[\"Three\"] = 51] = \"Three\";\n CharCodes[CharCodes[\"Four\"] = 52] = \"Four\";\n CharCodes[CharCodes[\"Five\"] = 53] = \"Five\";\n CharCodes[CharCodes[\"Six\"] = 54] = \"Six\";\n CharCodes[CharCodes[\"Seven\"] = 55] = \"Seven\";\n CharCodes[CharCodes[\"Eight\"] = 56] = \"Eight\";\n CharCodes[CharCodes[\"Nine\"] = 57] = \"Nine\";\n CharCodes[CharCodes[\"LessThan\"] = 60] = \"LessThan\";\n CharCodes[CharCodes[\"GreaterThan\"] = 62] = \"GreaterThan\";\n CharCodes[CharCodes[\"A\"] = 65] = \"A\";\n CharCodes[CharCodes[\"D\"] = 68] = \"D\";\n CharCodes[CharCodes[\"E\"] = 69] = \"E\";\n CharCodes[CharCodes[\"F\"] = 70] = \"F\";\n CharCodes[CharCodes[\"O\"] = 79] = \"O\";\n CharCodes[CharCodes[\"P\"] = 80] = \"P\";\n CharCodes[CharCodes[\"R\"] = 82] = \"R\";\n CharCodes[CharCodes[\"LeftSquareBracket\"] = 91] = \"LeftSquareBracket\";\n CharCodes[CharCodes[\"BackSlash\"] = 92] = \"BackSlash\";\n CharCodes[CharCodes[\"RightSquareBracket\"] = 93] = \"RightSquareBracket\";\n CharCodes[CharCodes[\"a\"] = 97] = \"a\";\n CharCodes[CharCodes[\"b\"] = 98] = \"b\";\n CharCodes[CharCodes[\"d\"] = 100] = \"d\";\n CharCodes[CharCodes[\"e\"] = 101] = \"e\";\n CharCodes[CharCodes[\"f\"] = 102] = \"f\";\n CharCodes[CharCodes[\"i\"] = 105] = \"i\";\n CharCodes[CharCodes[\"j\"] = 106] = \"j\";\n CharCodes[CharCodes[\"l\"] = 108] = \"l\";\n CharCodes[CharCodes[\"m\"] = 109] = \"m\";\n CharCodes[CharCodes[\"n\"] = 110] = \"n\";\n CharCodes[CharCodes[\"o\"] = 111] = \"o\";\n CharCodes[CharCodes[\"r\"] = 114] = \"r\";\n CharCodes[CharCodes[\"s\"] = 115] = \"s\";\n CharCodes[CharCodes[\"t\"] = 116] = \"t\";\n CharCodes[CharCodes[\"u\"] = 117] = \"u\";\n CharCodes[CharCodes[\"x\"] = 120] = \"x\";\n CharCodes[CharCodes[\"LeftCurly\"] = 123] = \"LeftCurly\";\n CharCodes[CharCodes[\"RightCurly\"] = 125] = \"RightCurly\";\n CharCodes[CharCodes[\"Tilde\"] = 126] = \"Tilde\";\n})(CharCodes || (CharCodes = {}));\nexport default CharCodes;\n//# sourceMappingURL=CharCodes.js.map","var Cache = /** @class */ (function () {\n function Cache(populate) {\n this.populate = populate;\n this.value = undefined;\n }\n Cache.prototype.getValue = function () {\n return this.value;\n };\n Cache.prototype.access = function () {\n if (!this.value)\n this.value = this.populate();\n return this.value;\n };\n Cache.prototype.invalidate = function () {\n this.value = undefined;\n };\n Cache.populatedBy = function (populate) { return new Cache(populate); };\n return Cache;\n}());\nexport default Cache;\n//# sourceMappingURL=Cache.js.map","import { __extends } from \"tslib\";\nimport { arrayAsString } from \"../utils\";\nvar MethodNotImplementedError = /** @class */ (function (_super) {\n __extends(MethodNotImplementedError, _super);\n function MethodNotImplementedError(className, methodName) {\n var _this = this;\n var msg = \"Method \" + className + \".\" + methodName + \"() not implemented\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return MethodNotImplementedError;\n}(Error));\nexport { MethodNotImplementedError };\nvar PrivateConstructorError = /** @class */ (function (_super) {\n __extends(PrivateConstructorError, _super);\n function PrivateConstructorError(className) {\n var _this = this;\n var msg = \"Cannot construct \" + className + \" - it has a private constructor\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return PrivateConstructorError;\n}(Error));\nexport { PrivateConstructorError };\nvar UnexpectedObjectTypeError = /** @class */ (function (_super) {\n __extends(UnexpectedObjectTypeError, _super);\n function UnexpectedObjectTypeError(expected, actual) {\n var _this = this;\n var name = function (t) { var _a, _b; return (_a = t === null || t === void 0 ? void 0 : t.name) !== null && _a !== void 0 ? _a : (_b = t === null || t === void 0 ? void 0 : t.constructor) === null || _b === void 0 ? void 0 : _b.name; };\n var expectedTypes = Array.isArray(expected)\n ? expected.map(name)\n : [name(expected)];\n var msg = \"Expected instance of \" + expectedTypes.join(' or ') + \", \" +\n (\"but got instance of \" + (actual ? name(actual) : actual));\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return UnexpectedObjectTypeError;\n}(Error));\nexport { UnexpectedObjectTypeError };\nvar UnsupportedEncodingError = /** @class */ (function (_super) {\n __extends(UnsupportedEncodingError, _super);\n function UnsupportedEncodingError(encoding) {\n var _this = this;\n var msg = encoding + \" stream encoding not supported\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return UnsupportedEncodingError;\n}(Error));\nexport { UnsupportedEncodingError };\nvar ReparseError = /** @class */ (function (_super) {\n __extends(ReparseError, _super);\n function ReparseError(className, methodName) {\n var _this = this;\n var msg = \"Cannot call \" + className + \".\" + methodName + \"() more than once\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return ReparseError;\n}(Error));\nexport { ReparseError };\nvar MissingCatalogError = /** @class */ (function (_super) {\n __extends(MissingCatalogError, _super);\n function MissingCatalogError(ref) {\n var _this = this;\n var msg = \"Missing catalog (ref=\" + ref + \")\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return MissingCatalogError;\n}(Error));\nexport { MissingCatalogError };\nvar MissingPageContentsEmbeddingError = /** @class */ (function (_super) {\n __extends(MissingPageContentsEmbeddingError, _super);\n function MissingPageContentsEmbeddingError() {\n var _this = this;\n var msg = \"Can't embed page with missing Contents\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return MissingPageContentsEmbeddingError;\n}(Error));\nexport { MissingPageContentsEmbeddingError };\nvar UnrecognizedStreamTypeError = /** @class */ (function (_super) {\n __extends(UnrecognizedStreamTypeError, _super);\n function UnrecognizedStreamTypeError(stream) {\n var _a, _b, _c;\n var _this = this;\n var streamType = (_c = (_b = (_a = stream === null || stream === void 0 ? void 0 : stream.contructor) === null || _a === void 0 ? void 0 : _a.name) !== null && _b !== void 0 ? _b : stream === null || stream === void 0 ? void 0 : stream.name) !== null && _c !== void 0 ? _c : stream;\n var msg = \"Unrecognized stream type: \" + streamType;\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return UnrecognizedStreamTypeError;\n}(Error));\nexport { UnrecognizedStreamTypeError };\nvar PageEmbeddingMismatchedContextError = /** @class */ (function (_super) {\n __extends(PageEmbeddingMismatchedContextError, _super);\n function PageEmbeddingMismatchedContextError() {\n var _this = this;\n var msg = \"Found mismatched contexts while embedding pages. All pages in the array passed to `PDFDocument.embedPages()` must be from the same document.\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return PageEmbeddingMismatchedContextError;\n}(Error));\nexport { PageEmbeddingMismatchedContextError };\nvar PDFArrayIsNotRectangleError = /** @class */ (function (_super) {\n __extends(PDFArrayIsNotRectangleError, _super);\n function PDFArrayIsNotRectangleError(size) {\n var _this = this;\n var msg = \"Attempted to convert PDFArray with \" + size + \" elements to rectangle, but must have exactly 4 elements.\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return PDFArrayIsNotRectangleError;\n}(Error));\nexport { PDFArrayIsNotRectangleError };\nvar InvalidPDFDateStringError = /** @class */ (function (_super) {\n __extends(InvalidPDFDateStringError, _super);\n function InvalidPDFDateStringError(value) {\n var _this = this;\n var msg = \"Attempted to convert \\\"\" + value + \"\\\" to a date, but it does not match the PDF date string format.\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return InvalidPDFDateStringError;\n}(Error));\nexport { InvalidPDFDateStringError };\nvar InvalidTargetIndexError = /** @class */ (function (_super) {\n __extends(InvalidTargetIndexError, _super);\n function InvalidTargetIndexError(targetIndex, Count) {\n var _this = this;\n var msg = \"Invalid targetIndex specified: targetIndex=\" + targetIndex + \" must be less than Count=\" + Count;\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return InvalidTargetIndexError;\n}(Error));\nexport { InvalidTargetIndexError };\nvar CorruptPageTreeError = /** @class */ (function (_super) {\n __extends(CorruptPageTreeError, _super);\n function CorruptPageTreeError(targetIndex, operation) {\n var _this = this;\n var msg = \"Failed to \" + operation + \" at targetIndex=\" + targetIndex + \" due to corrupt page tree: It is likely that one or more 'Count' entries are invalid\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return CorruptPageTreeError;\n}(Error));\nexport { CorruptPageTreeError };\nvar IndexOutOfBoundsError = /** @class */ (function (_super) {\n __extends(IndexOutOfBoundsError, _super);\n function IndexOutOfBoundsError(index, min, max) {\n var _this = this;\n var msg = \"index should be at least \" + min + \" and at most \" + max + \", but was actually \" + index;\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return IndexOutOfBoundsError;\n}(Error));\nexport { IndexOutOfBoundsError };\nvar InvalidAcroFieldValueError = /** @class */ (function (_super) {\n __extends(InvalidAcroFieldValueError, _super);\n function InvalidAcroFieldValueError() {\n var _this = this;\n var msg = \"Attempted to set invalid field value\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return InvalidAcroFieldValueError;\n}(Error));\nexport { InvalidAcroFieldValueError };\nvar MultiSelectValueError = /** @class */ (function (_super) {\n __extends(MultiSelectValueError, _super);\n function MultiSelectValueError() {\n var _this = this;\n var msg = \"Attempted to select multiple values for single-select field\";\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return MultiSelectValueError;\n}(Error));\nexport { MultiSelectValueError };\nvar MissingDAEntryError = /** @class */ (function (_super) {\n __extends(MissingDAEntryError, _super);\n function MissingDAEntryError(fieldName) {\n var _this = this;\n var msg = \"No /DA (default appearance) entry found for field: \" + fieldName;\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return MissingDAEntryError;\n}(Error));\nexport { MissingDAEntryError };\nvar MissingTfOperatorError = /** @class */ (function (_super) {\n __extends(MissingTfOperatorError, _super);\n function MissingTfOperatorError(fieldName) {\n var _this = this;\n var msg = \"No Tf operator found for DA of field: \" + fieldName;\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return MissingTfOperatorError;\n}(Error));\nexport { MissingTfOperatorError };\nvar NumberParsingError = /** @class */ (function (_super) {\n __extends(NumberParsingError, _super);\n function NumberParsingError(pos, value) {\n var _this = this;\n var msg = \"Failed to parse number \" +\n (\"(line:\" + pos.line + \" col:\" + pos.column + \" offset=\" + pos.offset + \"): \\\"\" + value + \"\\\"\");\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return NumberParsingError;\n}(Error));\nexport { NumberParsingError };\nvar PDFParsingError = /** @class */ (function (_super) {\n __extends(PDFParsingError, _super);\n function PDFParsingError(pos, details) {\n var _this = this;\n var msg = \"Failed to parse PDF document \" +\n (\"(line:\" + pos.line + \" col:\" + pos.column + \" offset=\" + pos.offset + \"): \" + details);\n _this = _super.call(this, msg) || this;\n return _this;\n }\n return PDFParsingError;\n}(Error));\nexport { PDFParsingError };\nvar NextByteAssertionError = /** @class */ (function (_super) {\n __extends(NextByteAssertionError, _super);\n function NextByteAssertionError(pos, expectedByte, actualByte) {\n var _this = this;\n var msg = \"Expected next byte to be \" + expectedByte + \" but it was actually \" + actualByte;\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return NextByteAssertionError;\n}(PDFParsingError));\nexport { NextByteAssertionError };\nvar PDFObjectParsingError = /** @class */ (function (_super) {\n __extends(PDFObjectParsingError, _super);\n function PDFObjectParsingError(pos, byte) {\n var _this = this;\n var msg = \"Failed to parse PDF object starting with the following byte: \" + byte;\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return PDFObjectParsingError;\n}(PDFParsingError));\nexport { PDFObjectParsingError };\nvar PDFInvalidObjectParsingError = /** @class */ (function (_super) {\n __extends(PDFInvalidObjectParsingError, _super);\n function PDFInvalidObjectParsingError(pos) {\n var _this = this;\n var msg = \"Failed to parse invalid PDF object\";\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return PDFInvalidObjectParsingError;\n}(PDFParsingError));\nexport { PDFInvalidObjectParsingError };\nvar PDFStreamParsingError = /** @class */ (function (_super) {\n __extends(PDFStreamParsingError, _super);\n function PDFStreamParsingError(pos) {\n var _this = this;\n var msg = \"Failed to parse PDF stream\";\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return PDFStreamParsingError;\n}(PDFParsingError));\nexport { PDFStreamParsingError };\nvar UnbalancedParenthesisError = /** @class */ (function (_super) {\n __extends(UnbalancedParenthesisError, _super);\n function UnbalancedParenthesisError(pos) {\n var _this = this;\n var msg = \"Failed to parse PDF literal string due to unbalanced parenthesis\";\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return UnbalancedParenthesisError;\n}(PDFParsingError));\nexport { UnbalancedParenthesisError };\nvar StalledParserError = /** @class */ (function (_super) {\n __extends(StalledParserError, _super);\n function StalledParserError(pos) {\n var _this = this;\n var msg = \"Parser stalled\";\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return StalledParserError;\n}(PDFParsingError));\nexport { StalledParserError };\nvar MissingPDFHeaderError = /** @class */ (function (_super) {\n __extends(MissingPDFHeaderError, _super);\n function MissingPDFHeaderError(pos) {\n var _this = this;\n var msg = \"No PDF header found\";\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return MissingPDFHeaderError;\n}(PDFParsingError));\nexport { MissingPDFHeaderError };\nvar MissingKeywordError = /** @class */ (function (_super) {\n __extends(MissingKeywordError, _super);\n function MissingKeywordError(pos, keyword) {\n var _this = this;\n var msg = \"Did not find expected keyword '\" + arrayAsString(keyword) + \"'\";\n _this = _super.call(this, pos, msg) || this;\n return _this;\n }\n return MissingKeywordError;\n}(PDFParsingError));\nexport { MissingKeywordError };\n//# sourceMappingURL=errors.js.map","'use strict';\n\n\nvar TYPED_OK = (typeof Uint8Array !== 'undefined') &&\n (typeof Uint16Array !== 'undefined') &&\n (typeof Int32Array !== 'undefined');\n\nfunction _has(obj, key) {\n return Object.prototype.hasOwnProperty.call(obj, key);\n}\n\nexports.assign = function (obj /*from1, from2, from3, ...*/) {\n var sources = Array.prototype.slice.call(arguments, 1);\n while (sources.length) {\n var source = sources.shift();\n if (!source) { continue; }\n\n if (typeof source !== 'object') {\n throw new TypeError(source + 'must be non-object');\n }\n\n for (var p in source) {\n if (_has(source, p)) {\n obj[p] = source[p];\n }\n }\n }\n\n return obj;\n};\n\n\n// reduce buffer size, avoiding mem copy\nexports.shrinkBuf = function (buf, size) {\n if (buf.length === size) { return buf; }\n if (buf.subarray) { return buf.subarray(0, size); }\n buf.length = size;\n return buf;\n};\n\n\nvar fnTyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n if (src.subarray && dest.subarray) {\n dest.set(src.subarray(src_offs, src_offs + len), dest_offs);\n return;\n }\n // Fallback to ordinary array\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n var i, l, len, pos, chunk, result;\n\n // calculate data length\n len = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n len += chunks[i].length;\n }\n\n // join chunks\n result = new Uint8Array(len);\n pos = 0;\n for (i = 0, l = chunks.length; i < l; i++) {\n chunk = chunks[i];\n result.set(chunk, pos);\n pos += chunk.length;\n }\n\n return result;\n }\n};\n\nvar fnUntyped = {\n arraySet: function (dest, src, src_offs, len, dest_offs) {\n for (var i = 0; i < len; i++) {\n dest[dest_offs + i] = src[src_offs + i];\n }\n },\n // Join array of chunks to single array.\n flattenChunks: function (chunks) {\n return [].concat.apply([], chunks);\n }\n};\n\n\n// Enable/Disable typed arrays use, for testing\n//\nexports.setTyped = function (on) {\n if (on) {\n exports.Buf8 = Uint8Array;\n exports.Buf16 = Uint16Array;\n exports.Buf32 = Int32Array;\n exports.assign(exports, fnTyped);\n } else {\n exports.Buf8 = Array;\n exports.Buf16 = Array;\n exports.Buf32 = Array;\n exports.assign(exports, fnUntyped);\n }\n};\n\nexports.setTyped(TYPED_OK);\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n/* eslint-disable space-unary-ops */\n\nvar utils = require('../utils/common');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n//var Z_FILTERED = 1;\n//var Z_HUFFMAN_ONLY = 2;\n//var Z_RLE = 3;\nvar Z_FIXED = 4;\n//var Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\nvar Z_BINARY = 0;\nvar Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n/*============================================================================*/\n\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n// From zutil.h\n\nvar STORED_BLOCK = 0;\nvar STATIC_TREES = 1;\nvar DYN_TREES = 2;\n/* The three kinds of block type */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\n/* The minimum and maximum match lengths */\n\n// From deflate.h\n/* ===========================================================================\n * Internal compression state.\n */\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\n\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\n\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\n\nvar D_CODES = 30;\n/* number of distance codes */\n\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\n\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\n\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar Buf_size = 16;\n/* size of bit buffer in bi_buf */\n\n\n/* ===========================================================================\n * Constants\n */\n\nvar MAX_BL_BITS = 7;\n/* Bit length codes must not exceed MAX_BL_BITS bits */\n\nvar END_BLOCK = 256;\n/* end of block literal code */\n\nvar REP_3_6 = 16;\n/* repeat previous bit length 3-6 times (2 bits of repeat count) */\n\nvar REPZ_3_10 = 17;\n/* repeat a zero length 3-10 times (3 bits of repeat count) */\n\nvar REPZ_11_138 = 18;\n/* repeat a zero length 11-138 times (7 bits of repeat count) */\n\n/* eslint-disable comma-spacing,array-bracket-spacing */\nvar extra_lbits = /* extra bits for each length code */\n [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0];\n\nvar extra_dbits = /* extra bits for each distance code */\n [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13];\n\nvar extra_blbits = /* extra bits for each bit length code */\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7];\n\nvar bl_order =\n [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];\n/* eslint-enable comma-spacing,array-bracket-spacing */\n\n/* The lengths of the bit length codes are sent in order of decreasing\n * probability, to avoid transmitting the lengths for unused bit length codes.\n */\n\n/* ===========================================================================\n * Local data. These are initialized only once.\n */\n\n// We pre-fill arrays with 0 to avoid uninitialized gaps\n\nvar DIST_CODE_LEN = 512; /* see definition of array dist_code below */\n\n// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1\nvar static_ltree = new Array((L_CODES + 2) * 2);\nzero(static_ltree);\n/* The static literal tree. Since the bit lengths are imposed, there is no\n * need for the L_CODES extra codes used during heap construction. However\n * The codes 286 and 287 are needed to build a canonical tree (see _tr_init\n * below).\n */\n\nvar static_dtree = new Array(D_CODES * 2);\nzero(static_dtree);\n/* The static distance tree. (Actually a trivial tree since all codes use\n * 5 bits.)\n */\n\nvar _dist_code = new Array(DIST_CODE_LEN);\nzero(_dist_code);\n/* Distance codes. The first 256 values correspond to the distances\n * 3 .. 258, the last 256 values correspond to the top 8 bits of\n * the 15 bit distances.\n */\n\nvar _length_code = new Array(MAX_MATCH - MIN_MATCH + 1);\nzero(_length_code);\n/* length code for each normalized match length (0 == MIN_MATCH) */\n\nvar base_length = new Array(LENGTH_CODES);\nzero(base_length);\n/* First normalized length for each code (0 = MIN_MATCH) */\n\nvar base_dist = new Array(D_CODES);\nzero(base_dist);\n/* First normalized distance for each code (0 = distance of 1) */\n\n\nfunction StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) {\n\n this.static_tree = static_tree; /* static tree or NULL */\n this.extra_bits = extra_bits; /* extra bits for each code or NULL */\n this.extra_base = extra_base; /* base index for extra_bits */\n this.elems = elems; /* max number of elements in the tree */\n this.max_length = max_length; /* max bit length for the codes */\n\n // show if `static_tree` has data or dummy - needed for monomorphic objects\n this.has_stree = static_tree && static_tree.length;\n}\n\n\nvar static_l_desc;\nvar static_d_desc;\nvar static_bl_desc;\n\n\nfunction TreeDesc(dyn_tree, stat_desc) {\n this.dyn_tree = dyn_tree; /* the dynamic tree */\n this.max_code = 0; /* largest code with non zero frequency */\n this.stat_desc = stat_desc; /* the corresponding static tree */\n}\n\n\n\nfunction d_code(dist) {\n return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];\n}\n\n\n/* ===========================================================================\n * Output a short LSB first on the stream.\n * IN assertion: there is enough room in pendingBuf.\n */\nfunction put_short(s, w) {\n// put_byte(s, (uch)((w) & 0xff));\n// put_byte(s, (uch)((ush)(w) >> 8));\n s.pending_buf[s.pending++] = (w) & 0xff;\n s.pending_buf[s.pending++] = (w >>> 8) & 0xff;\n}\n\n\n/* ===========================================================================\n * Send a value on a given number of bits.\n * IN assertion: length <= 16 and value fits in length bits.\n */\nfunction send_bits(s, value, length) {\n if (s.bi_valid > (Buf_size - length)) {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n put_short(s, s.bi_buf);\n s.bi_buf = value >> (Buf_size - s.bi_valid);\n s.bi_valid += length - Buf_size;\n } else {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n s.bi_valid += length;\n }\n}\n\n\nfunction send_code(s, c, tree) {\n send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/);\n}\n\n\n/* ===========================================================================\n * Reverse the first len bits of a code, using straightforward code (a faster\n * method would use a table)\n * IN assertion: 1 <= len <= 15\n */\nfunction bi_reverse(code, len) {\n var res = 0;\n do {\n res |= code & 1;\n code >>>= 1;\n res <<= 1;\n } while (--len > 0);\n return res >>> 1;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer, keeping at most 7 bits in it.\n */\nfunction bi_flush(s) {\n if (s.bi_valid === 16) {\n put_short(s, s.bi_buf);\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n } else if (s.bi_valid >= 8) {\n s.pending_buf[s.pending++] = s.bi_buf & 0xff;\n s.bi_buf >>= 8;\n s.bi_valid -= 8;\n }\n}\n\n\n/* ===========================================================================\n * Compute the optimal bit lengths for a tree and update the total bit length\n * for the current block.\n * IN assertion: the fields freq and dad are set, heap[heap_max] and\n * above are the tree nodes sorted by increasing frequency.\n * OUT assertions: the field len is set to the optimal bit length, the\n * array bl_count contains the frequencies for each bit length.\n * The length opt_len is updated; static_len is also updated if stree is\n * not null.\n */\nfunction gen_bitlen(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var max_code = desc.max_code;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var extra = desc.stat_desc.extra_bits;\n var base = desc.stat_desc.extra_base;\n var max_length = desc.stat_desc.max_length;\n var h; /* heap index */\n var n, m; /* iterate over the tree elements */\n var bits; /* bit length */\n var xbits; /* extra bits */\n var f; /* frequency */\n var overflow = 0; /* number of elements with bit length too large */\n\n for (bits = 0; bits <= MAX_BITS; bits++) {\n s.bl_count[bits] = 0;\n }\n\n /* In a first pass, compute the optimal bit lengths (which may\n * overflow in the case of the bit length tree).\n */\n tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */\n\n for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {\n n = s.heap[h];\n bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1;\n if (bits > max_length) {\n bits = max_length;\n overflow++;\n }\n tree[n * 2 + 1]/*.Len*/ = bits;\n /* We overwrite tree[n].Dad which is no longer needed */\n\n if (n > max_code) { continue; } /* not a leaf node */\n\n s.bl_count[bits]++;\n xbits = 0;\n if (n >= base) {\n xbits = extra[n - base];\n }\n f = tree[n * 2]/*.Freq*/;\n s.opt_len += f * (bits + xbits);\n if (has_stree) {\n s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);\n }\n }\n if (overflow === 0) { return; }\n\n // Trace((stderr,\"\\nbit length overflow\\n\"));\n /* This happens for example on obj2 and pic of the Calgary corpus */\n\n /* Find the first bit length which could increase: */\n do {\n bits = max_length - 1;\n while (s.bl_count[bits] === 0) { bits--; }\n s.bl_count[bits]--; /* move one leaf down the tree */\n s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */\n s.bl_count[max_length]--;\n /* The brother of the overflow item also moves one step up,\n * but this does not affect bl_count[max_length]\n */\n overflow -= 2;\n } while (overflow > 0);\n\n /* Now recompute all bit lengths, scanning in increasing frequency.\n * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all\n * lengths instead of fixing only the wrong ones. This idea is taken\n * from 'ar' written by Haruhiko Okumura.)\n */\n for (bits = max_length; bits !== 0; bits--) {\n n = s.bl_count[bits];\n while (n !== 0) {\n m = s.heap[--h];\n if (m > max_code) { continue; }\n if (tree[m * 2 + 1]/*.Len*/ !== bits) {\n // Trace((stderr,\"code %d bits %d->%d\\n\", m, tree[m].Len, bits));\n s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/;\n tree[m * 2 + 1]/*.Len*/ = bits;\n }\n n--;\n }\n }\n}\n\n\n/* ===========================================================================\n * Generate the codes for a given tree and bit counts (which need not be\n * optimal).\n * IN assertion: the array bl_count contains the bit length statistics for\n * the given tree and the field len is set for all tree elements.\n * OUT assertion: the field code is set for all tree elements of non\n * zero code length.\n */\nfunction gen_codes(tree, max_code, bl_count)\n// ct_data *tree; /* the tree to decorate */\n// int max_code; /* largest code with non zero frequency */\n// ushf *bl_count; /* number of codes at each bit length */\n{\n var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */\n var code = 0; /* running code value */\n var bits; /* bit index */\n var n; /* code index */\n\n /* The distribution counts are first used to generate the code values\n * without bit reversal.\n */\n for (bits = 1; bits <= MAX_BITS; bits++) {\n next_code[bits] = code = (code + bl_count[bits - 1]) << 1;\n }\n /* Check that the bit counts in bl_count are consistent. The last code\n * must be all ones.\n */\n //Assert (code + bl_count[MAX_BITS]-1 == (1< length code (0..28) */\n length = 0;\n for (code = 0; code < LENGTH_CODES - 1; code++) {\n base_length[code] = length;\n for (n = 0; n < (1 << extra_lbits[code]); n++) {\n _length_code[length++] = code;\n }\n }\n //Assert (length == 256, \"tr_static_init: length != 256\");\n /* Note that the length 255 (match length 258) can be represented\n * in two different ways: code 284 + 5 bits or code 285, so we\n * overwrite length_code[255] to use the best encoding:\n */\n _length_code[length - 1] = code;\n\n /* Initialize the mapping dist (0..32K) -> dist code (0..29) */\n dist = 0;\n for (code = 0; code < 16; code++) {\n base_dist[code] = dist;\n for (n = 0; n < (1 << extra_dbits[code]); n++) {\n _dist_code[dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: dist != 256\");\n dist >>= 7; /* from now on, all distances are divided by 128 */\n for (; code < D_CODES; code++) {\n base_dist[code] = dist << 7;\n for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {\n _dist_code[256 + dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: 256+dist != 512\");\n\n /* Construct the codes of the static literal tree */\n for (bits = 0; bits <= MAX_BITS; bits++) {\n bl_count[bits] = 0;\n }\n\n n = 0;\n while (n <= 143) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n while (n <= 255) {\n static_ltree[n * 2 + 1]/*.Len*/ = 9;\n n++;\n bl_count[9]++;\n }\n while (n <= 279) {\n static_ltree[n * 2 + 1]/*.Len*/ = 7;\n n++;\n bl_count[7]++;\n }\n while (n <= 287) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n /* Codes 286 and 287 do not exist, but we must include them in the\n * tree construction to get a canonical Huffman tree (longest code\n * all ones)\n */\n gen_codes(static_ltree, L_CODES + 1, bl_count);\n\n /* The static distance tree is trivial: */\n for (n = 0; n < D_CODES; n++) {\n static_dtree[n * 2 + 1]/*.Len*/ = 5;\n static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5);\n }\n\n // Now data ready and we can init static trees\n static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);\n static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS);\n static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS);\n\n //static_init_done = true;\n}\n\n\n/* ===========================================================================\n * Initialize a new block.\n */\nfunction init_block(s) {\n var n; /* iterates over tree elements */\n\n /* Initialize the trees. */\n for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; }\n\n s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;\n s.opt_len = s.static_len = 0;\n s.last_lit = s.matches = 0;\n}\n\n\n/* ===========================================================================\n * Flush the bit buffer and align the output on a byte boundary\n */\nfunction bi_windup(s)\n{\n if (s.bi_valid > 8) {\n put_short(s, s.bi_buf);\n } else if (s.bi_valid > 0) {\n //put_byte(s, (Byte)s->bi_buf);\n s.pending_buf[s.pending++] = s.bi_buf;\n }\n s.bi_buf = 0;\n s.bi_valid = 0;\n}\n\n/* ===========================================================================\n * Copy a stored block, storing first the length and its\n * one's complement if requested.\n */\nfunction copy_block(s, buf, len, header)\n//DeflateState *s;\n//charf *buf; /* the input data */\n//unsigned len; /* its length */\n//int header; /* true if block header must be written */\n{\n bi_windup(s); /* align on byte boundary */\n\n if (header) {\n put_short(s, len);\n put_short(s, ~len);\n }\n// while (len--) {\n// put_byte(s, *buf++);\n// }\n utils.arraySet(s.pending_buf, s.window, buf, len, s.pending);\n s.pending += len;\n}\n\n/* ===========================================================================\n * Compares to subtrees, using the tree depth as tie breaker when\n * the subtrees have equal frequency. This minimizes the worst case length.\n */\nfunction smaller(tree, n, m, depth) {\n var _n2 = n * 2;\n var _m2 = m * 2;\n return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||\n (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));\n}\n\n/* ===========================================================================\n * Restore the heap property by moving down the tree starting at node k,\n * exchanging a node with the smallest of its two sons if necessary, stopping\n * when the heap property is re-established (each father smaller than its\n * two sons).\n */\nfunction pqdownheap(s, tree, k)\n// deflate_state *s;\n// ct_data *tree; /* the tree to restore */\n// int k; /* node to move down */\n{\n var v = s.heap[k];\n var j = k << 1; /* left son of k */\n while (j <= s.heap_len) {\n /* Set j to the smallest of the two sons: */\n if (j < s.heap_len &&\n smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {\n j++;\n }\n /* Exit if v is smaller than both sons */\n if (smaller(tree, v, s.heap[j], s.depth)) { break; }\n\n /* Exchange v with the smallest son */\n s.heap[k] = s.heap[j];\n k = j;\n\n /* And continue down the tree, setting j to the left son of k */\n j <<= 1;\n }\n s.heap[k] = v;\n}\n\n\n// inlined manually\n// var SMALLEST = 1;\n\n/* ===========================================================================\n * Send the block data compressed using the given Huffman trees\n */\nfunction compress_block(s, ltree, dtree)\n// deflate_state *s;\n// const ct_data *ltree; /* literal tree */\n// const ct_data *dtree; /* distance tree */\n{\n var dist; /* distance of matched string */\n var lc; /* match length or unmatched char (if dist == 0) */\n var lx = 0; /* running index in l_buf */\n var code; /* the code to send */\n var extra; /* number of extra bits to send */\n\n if (s.last_lit !== 0) {\n do {\n dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]);\n lc = s.pending_buf[s.l_buf + lx];\n lx++;\n\n if (dist === 0) {\n send_code(s, lc, ltree); /* send a literal byte */\n //Tracecv(isgraph(lc), (stderr,\" '%c' \", lc));\n } else {\n /* Here, lc is the match length - MIN_MATCH */\n code = _length_code[lc];\n send_code(s, code + LITERALS + 1, ltree); /* send the length code */\n extra = extra_lbits[code];\n if (extra !== 0) {\n lc -= base_length[code];\n send_bits(s, lc, extra); /* send the extra length bits */\n }\n dist--; /* dist is now the match distance - 1 */\n code = d_code(dist);\n //Assert (code < D_CODES, \"bad d_code\");\n\n send_code(s, code, dtree); /* send the distance code */\n extra = extra_dbits[code];\n if (extra !== 0) {\n dist -= base_dist[code];\n send_bits(s, dist, extra); /* send the extra distance bits */\n }\n } /* literal or match pair ? */\n\n /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */\n //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,\n // \"pendingBuf overflow\");\n\n } while (lx < s.last_lit);\n }\n\n send_code(s, END_BLOCK, ltree);\n}\n\n\n/* ===========================================================================\n * Construct one Huffman tree and assigns the code bit strings and lengths.\n * Update the total bit length for the current block.\n * IN assertion: the field freq is set for all tree elements.\n * OUT assertions: the fields len and code are set to the optimal bit length\n * and corresponding code. The length opt_len is updated; static_len is\n * also updated if stree is not null. The field max_code is set.\n */\nfunction build_tree(s, desc)\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n var tree = desc.dyn_tree;\n var stree = desc.stat_desc.static_tree;\n var has_stree = desc.stat_desc.has_stree;\n var elems = desc.stat_desc.elems;\n var n, m; /* iterate over heap elements */\n var max_code = -1; /* largest code with non zero frequency */\n var node; /* new node being created */\n\n /* Construct the initial heap, with least frequent element in\n * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].\n * heap[0] is not used.\n */\n s.heap_len = 0;\n s.heap_max = HEAP_SIZE;\n\n for (n = 0; n < elems; n++) {\n if (tree[n * 2]/*.Freq*/ !== 0) {\n s.heap[++s.heap_len] = max_code = n;\n s.depth[n] = 0;\n\n } else {\n tree[n * 2 + 1]/*.Len*/ = 0;\n }\n }\n\n /* The pkzip format requires that at least one distance code exists,\n * and that at least one bit should be sent even if there is only one\n * possible code. So to avoid special checks later on we force at least\n * two codes of non zero frequency.\n */\n while (s.heap_len < 2) {\n node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);\n tree[node * 2]/*.Freq*/ = 1;\n s.depth[node] = 0;\n s.opt_len--;\n\n if (has_stree) {\n s.static_len -= stree[node * 2 + 1]/*.Len*/;\n }\n /* node is 0 or 1 so it does not have extra bits */\n }\n desc.max_code = max_code;\n\n /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,\n * establish sub-heaps of increasing lengths:\n */\n for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); }\n\n /* Construct the Huffman tree by repeatedly combining the least two\n * frequent nodes.\n */\n node = elems; /* next internal node of the tree */\n do {\n //pqremove(s, tree, n); /* n = node of least frequency */\n /*** pqremove ***/\n n = s.heap[1/*SMALLEST*/];\n s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--];\n pqdownheap(s, tree, 1/*SMALLEST*/);\n /***/\n\n m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */\n\n s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */\n s.heap[--s.heap_max] = m;\n\n /* Create a new node father of n and m */\n tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/;\n s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1;\n tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node;\n\n /* and insert the new node in the heap */\n s.heap[1/*SMALLEST*/] = node++;\n pqdownheap(s, tree, 1/*SMALLEST*/);\n\n } while (s.heap_len >= 2);\n\n s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];\n\n /* At this point, the fields freq and dad are set. We can now\n * generate the bit lengths.\n */\n gen_bitlen(s, desc);\n\n /* The field len is now set, we can generate the bit codes */\n gen_codes(tree, max_code, s.bl_count);\n}\n\n\n/* ===========================================================================\n * Scan a literal or distance tree to determine the frequencies of the codes\n * in the bit length tree.\n */\nfunction scan_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n s.bl_tree[curlen * 2]/*.Freq*/ += count;\n\n } else if (curlen !== 0) {\n\n if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; }\n s.bl_tree[REP_3_6 * 2]/*.Freq*/++;\n\n } else if (count <= 10) {\n s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++;\n\n } else {\n s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;\n }\n\n count = 0;\n prevlen = curlen;\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Send a literal or distance tree in compressed form, using the codes in\n * bl_tree.\n */\nfunction send_tree(s, tree, max_code)\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n var n; /* iterates over all tree elements */\n var prevlen = -1; /* last emitted length */\n var curlen; /* length of current code */\n\n var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n var count = 0; /* repeat count of the current code */\n var max_count = 7; /* max repeat count */\n var min_count = 4; /* min repeat count */\n\n /* tree[max_code+1].Len = -1; */ /* guard already set */\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n do { send_code(s, curlen, s.bl_tree); } while (--count !== 0);\n\n } else if (curlen !== 0) {\n if (curlen !== prevlen) {\n send_code(s, curlen, s.bl_tree);\n count--;\n }\n //Assert(count >= 3 && count <= 6, \" 3_6?\");\n send_code(s, REP_3_6, s.bl_tree);\n send_bits(s, count - 3, 2);\n\n } else if (count <= 10) {\n send_code(s, REPZ_3_10, s.bl_tree);\n send_bits(s, count - 3, 3);\n\n } else {\n send_code(s, REPZ_11_138, s.bl_tree);\n send_bits(s, count - 11, 7);\n }\n\n count = 0;\n prevlen = curlen;\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n}\n\n\n/* ===========================================================================\n * Construct the Huffman tree for the bit lengths and return the index in\n * bl_order of the last bit length code to send.\n */\nfunction build_bl_tree(s) {\n var max_blindex; /* index of last bit length code of non zero freq */\n\n /* Determine the bit length frequencies for literal and distance trees */\n scan_tree(s, s.dyn_ltree, s.l_desc.max_code);\n scan_tree(s, s.dyn_dtree, s.d_desc.max_code);\n\n /* Build the bit length tree: */\n build_tree(s, s.bl_desc);\n /* opt_len now includes the length of the tree representations, except\n * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.\n */\n\n /* Determine the number of bit length codes to send. The pkzip format\n * requires that at least 4 bit length codes be sent. (appnote.txt says\n * 3 but the actual value used is 4.)\n */\n for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {\n if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {\n break;\n }\n }\n /* Update opt_len to include the bit length tree and counts */\n s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;\n //Tracev((stderr, \"\\ndyn trees: dyn %ld, stat %ld\",\n // s->opt_len, s->static_len));\n\n return max_blindex;\n}\n\n\n/* ===========================================================================\n * Send the header for a block using dynamic Huffman trees: the counts, the\n * lengths of the bit length codes, the literal tree and the distance tree.\n * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.\n */\nfunction send_all_trees(s, lcodes, dcodes, blcodes)\n// deflate_state *s;\n// int lcodes, dcodes, blcodes; /* number of codes for each tree */\n{\n var rank; /* index in bl_order */\n\n //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, \"not enough codes\");\n //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,\n // \"too many codes\");\n //Tracev((stderr, \"\\nbl counts: \"));\n send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */\n send_bits(s, dcodes - 1, 5);\n send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */\n for (rank = 0; rank < blcodes; rank++) {\n //Tracev((stderr, \"\\nbl code %2d \", bl_order[rank]));\n send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);\n }\n //Tracev((stderr, \"\\nbl tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */\n //Tracev((stderr, \"\\nlit tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */\n //Tracev((stderr, \"\\ndist tree: sent %ld\", s->bits_sent));\n}\n\n\n/* ===========================================================================\n * Check if the data type is TEXT or BINARY, using the following algorithm:\n * - TEXT if the two conditions below are satisfied:\n * a) There are no non-portable control characters belonging to the\n * \"black list\" (0..6, 14..25, 28..31).\n * b) There is at least one printable character belonging to the\n * \"white list\" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).\n * - BINARY otherwise.\n * - The following partially-portable control characters form a\n * \"gray list\" that is ignored in this detection algorithm:\n * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).\n * IN assertion: the fields Freq of dyn_ltree are set.\n */\nfunction detect_data_type(s) {\n /* black_mask is the bit mask of black-listed bytes\n * set bits 0..6, 14..25, and 28..31\n * 0xf3ffc07f = binary 11110011111111111100000001111111\n */\n var black_mask = 0xf3ffc07f;\n var n;\n\n /* Check for non-textual (\"black-listed\") bytes. */\n for (n = 0; n <= 31; n++, black_mask >>>= 1) {\n if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) {\n return Z_BINARY;\n }\n }\n\n /* Check for textual (\"white-listed\") bytes. */\n if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 ||\n s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n for (n = 32; n < LITERALS; n++) {\n if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n }\n\n /* There are no \"black-listed\" or \"white-listed\" bytes:\n * this stream either is empty or has tolerated (\"gray-listed\") bytes only.\n */\n return Z_BINARY;\n}\n\n\nvar static_init_done = false;\n\n/* ===========================================================================\n * Initialize the tree data structures for a new zlib stream.\n */\nfunction _tr_init(s)\n{\n\n if (!static_init_done) {\n tr_static_init();\n static_init_done = true;\n }\n\n s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc);\n s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc);\n s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc);\n\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n /* Initialize the first block of the first file: */\n init_block(s);\n}\n\n\n/* ===========================================================================\n * Send a stored block\n */\nfunction _tr_stored_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */\n copy_block(s, buf, stored_len, true); /* with header */\n}\n\n\n/* ===========================================================================\n * Send one empty static block to give enough lookahead for inflate.\n * This takes 10 bits, of which 7 may remain in the bit buffer.\n */\nfunction _tr_align(s) {\n send_bits(s, STATIC_TREES << 1, 3);\n send_code(s, END_BLOCK, static_ltree);\n bi_flush(s);\n}\n\n\n/* ===========================================================================\n * Determine the best encoding for the current block: dynamic trees, static\n * trees or store, and output the encoded block to the zip file.\n */\nfunction _tr_flush_block(s, buf, stored_len, last)\n//DeflateState *s;\n//charf *buf; /* input block, or NULL if too old */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n var opt_lenb, static_lenb; /* opt_len and static_len in bytes */\n var max_blindex = 0; /* index of last bit length code of non zero freq */\n\n /* Build the Huffman trees unless a stored block is forced */\n if (s.level > 0) {\n\n /* Check if the file is binary or text */\n if (s.strm.data_type === Z_UNKNOWN) {\n s.strm.data_type = detect_data_type(s);\n }\n\n /* Construct the literal and distance trees */\n build_tree(s, s.l_desc);\n // Tracev((stderr, \"\\nlit data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n\n build_tree(s, s.d_desc);\n // Tracev((stderr, \"\\ndist data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n /* At this point, opt_len and static_len are the total bit lengths of\n * the compressed block data, excluding the tree representations.\n */\n\n /* Build the bit length tree for the above two trees, and get the index\n * in bl_order of the last bit length code to send.\n */\n max_blindex = build_bl_tree(s);\n\n /* Determine the best encoding. Compute the block lengths in bytes. */\n opt_lenb = (s.opt_len + 3 + 7) >>> 3;\n static_lenb = (s.static_len + 3 + 7) >>> 3;\n\n // Tracev((stderr, \"\\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u \",\n // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,\n // s->last_lit));\n\n if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; }\n\n } else {\n // Assert(buf != (char*)0, \"lost buf\");\n opt_lenb = static_lenb = stored_len + 5; /* force a stored block */\n }\n\n if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) {\n /* 4: two words for the lengths */\n\n /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.\n * Otherwise we can't have processed more than WSIZE input bytes since\n * the last block flush, because compression would have been\n * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to\n * transform a block into a stored block.\n */\n _tr_stored_block(s, buf, stored_len, last);\n\n } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) {\n\n send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3);\n compress_block(s, static_ltree, static_dtree);\n\n } else {\n send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);\n send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1);\n compress_block(s, s.dyn_ltree, s.dyn_dtree);\n }\n // Assert (s->compressed_len == s->bits_sent, \"bad compressed size\");\n /* The above check is made mod 2^32, for files larger than 512 MB\n * and uLong implemented on 32 bits.\n */\n init_block(s);\n\n if (last) {\n bi_windup(s);\n }\n // Tracev((stderr,\"\\ncomprlen %lu(%lu) \", s->compressed_len>>3,\n // s->compressed_len-7*last));\n}\n\n/* ===========================================================================\n * Save the match info and tally the frequency counts. Return true if\n * the current block must be flushed.\n */\nfunction _tr_tally(s, dist, lc)\n// deflate_state *s;\n// unsigned dist; /* distance of matched string */\n// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */\n{\n //var out_length, in_length, dcode;\n\n s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff;\n s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff;\n\n s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff;\n s.last_lit++;\n\n if (dist === 0) {\n /* lc is the unmatched char */\n s.dyn_ltree[lc * 2]/*.Freq*/++;\n } else {\n s.matches++;\n /* Here, lc is the match length - MIN_MATCH */\n dist--; /* dist = match distance - 1 */\n //Assert((ush)dist < (ush)MAX_DIST(s) &&\n // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&\n // (ush)d_code(dist) < (ush)D_CODES, \"_tr_tally: bad match\");\n\n s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++;\n s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++;\n }\n\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n\n//#ifdef TRUNCATE_BLOCK\n// /* Try to guess if it is profitable to stop the current block here */\n// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) {\n// /* Compute an upper bound for the compressed length */\n// out_length = s.last_lit*8;\n// in_length = s.strstart - s.block_start;\n//\n// for (dcode = 0; dcode < D_CODES; dcode++) {\n// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]);\n// }\n// out_length >>>= 3;\n// //Tracev((stderr,\"\\nlast_lit %u, in %ld, out ~%ld(%ld%%) \",\n// // s->last_lit, in_length, out_length,\n// // 100L - out_length*100L/in_length));\n// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) {\n// return true;\n// }\n// }\n//#endif\n\n return (s.last_lit === s.lit_bufsize - 1);\n /* We avoid equality with lit_bufsize because of wraparound at 64K\n * on 16 bit machines and because stored blocks are restricted to\n * 64K-1 bytes.\n */\n}\n\nexports._tr_init = _tr_init;\nexports._tr_stored_block = _tr_stored_block;\nexports._tr_flush_block = _tr_flush_block;\nexports._tr_tally = _tr_tally;\nexports._tr_align = _tr_align;\n","'use strict';\n\n// Note: adler32 takes 12% for level 0 and 2% for level 6.\n// It isn't worth it to make additional optimizations as in original.\n// Small size is preferable.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction adler32(adler, buf, len, pos) {\n var s1 = (adler & 0xffff) |0,\n s2 = ((adler >>> 16) & 0xffff) |0,\n n = 0;\n\n while (len !== 0) {\n // Set limit ~ twice less than 5552, to keep\n // s2 in 31-bits, because we force signed ints.\n // in other case %= will fail.\n n = len > 2000 ? 2000 : len;\n len -= n;\n\n do {\n s1 = (s1 + buf[pos++]) |0;\n s2 = (s2 + s1) |0;\n } while (--n);\n\n s1 %= 65521;\n s2 %= 65521;\n }\n\n return (s1 | (s2 << 16)) |0;\n}\n\n\nmodule.exports = adler32;\n","'use strict';\n\n// Note: we can't get significant speed boost here.\n// So write code to minimize size - no pregenerated tables\n// and array tools dependencies.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// Use ordinary array, since untyped makes no boost here\nfunction makeTable() {\n var c, table = [];\n\n for (var n = 0; n < 256; n++) {\n c = n;\n for (var k = 0; k < 8; k++) {\n c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));\n }\n table[n] = c;\n }\n\n return table;\n}\n\n// Create table on load. Just 255 signed longs. Not a problem.\nvar crcTable = makeTable();\n\n\nfunction crc32(crc, buf, len, pos) {\n var t = crcTable,\n end = pos + len;\n\n crc ^= -1;\n\n for (var i = pos; i < end; i++) {\n crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];\n }\n\n return (crc ^ (-1)); // >>> 0;\n}\n\n\nmodule.exports = crc32;\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\nvar trees = require('./trees');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar msg = require('./messages');\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\nvar Z_NO_FLUSH = 0;\nvar Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\nvar Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\n//var Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\n//var Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\n//var Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n\n/* compression levels */\n//var Z_NO_COMPRESSION = 0;\n//var Z_BEST_SPEED = 1;\n//var Z_BEST_COMPRESSION = 9;\nvar Z_DEFAULT_COMPRESSION = -1;\n\n\nvar Z_FILTERED = 1;\nvar Z_HUFFMAN_ONLY = 2;\nvar Z_RLE = 3;\nvar Z_FIXED = 4;\nvar Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\n//var Z_BINARY = 0;\n//var Z_TEXT = 1;\n//var Z_ASCII = 1; // = Z_TEXT\nvar Z_UNKNOWN = 2;\n\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n/*============================================================================*/\n\n\nvar MAX_MEM_LEVEL = 9;\n/* Maximum value for memLevel in deflateInit2 */\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_MEM_LEVEL = 8;\n\n\nvar LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\nvar LITERALS = 256;\n/* number of literal bytes 0..255 */\nvar L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\nvar D_CODES = 30;\n/* number of distance codes */\nvar BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\nvar HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\nvar MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nvar MIN_MATCH = 3;\nvar MAX_MATCH = 258;\nvar MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);\n\nvar PRESET_DICT = 0x20;\n\nvar INIT_STATE = 42;\nvar EXTRA_STATE = 69;\nvar NAME_STATE = 73;\nvar COMMENT_STATE = 91;\nvar HCRC_STATE = 103;\nvar BUSY_STATE = 113;\nvar FINISH_STATE = 666;\n\nvar BS_NEED_MORE = 1; /* block not completed, need more input or more output */\nvar BS_BLOCK_DONE = 2; /* block flush performed */\nvar BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */\nvar BS_FINISH_DONE = 4; /* finish done, accept no more input or output */\n\nvar OS_CODE = 0x03; // Unix :) . Don't detect, use this default.\n\nfunction err(strm, errorCode) {\n strm.msg = msg[errorCode];\n return errorCode;\n}\n\nfunction rank(f) {\n return ((f) << 1) - ((f) > 4 ? 9 : 0);\n}\n\nfunction zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n\n/* =========================================================================\n * Flush as much pending output as possible. All deflate() output goes\n * through this function so some applications may wish to modify it\n * to avoid allocating a large strm->output buffer and copying into it.\n * (See also read_buf()).\n */\nfunction flush_pending(strm) {\n var s = strm.state;\n\n //_tr_flush_bits(s);\n var len = s.pending;\n if (len > strm.avail_out) {\n len = strm.avail_out;\n }\n if (len === 0) { return; }\n\n utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out);\n strm.next_out += len;\n s.pending_out += len;\n strm.total_out += len;\n strm.avail_out -= len;\n s.pending -= len;\n if (s.pending === 0) {\n s.pending_out = 0;\n }\n}\n\n\nfunction flush_block_only(s, last) {\n trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last);\n s.block_start = s.strstart;\n flush_pending(s.strm);\n}\n\n\nfunction put_byte(s, b) {\n s.pending_buf[s.pending++] = b;\n}\n\n\n/* =========================================================================\n * Put a short in the pending buffer. The 16-bit value is put in MSB order.\n * IN assertion: the stream state is correct and there is enough room in\n * pending_buf.\n */\nfunction putShortMSB(s, b) {\n// put_byte(s, (Byte)(b >> 8));\n// put_byte(s, (Byte)(b & 0xff));\n s.pending_buf[s.pending++] = (b >>> 8) & 0xff;\n s.pending_buf[s.pending++] = b & 0xff;\n}\n\n\n/* ===========================================================================\n * Read a new buffer from the current input stream, update the adler32\n * and total number of bytes read. All deflate() input goes through\n * this function so some applications may wish to modify it to avoid\n * allocating a large strm->input buffer and copying from it.\n * (See also flush_pending()).\n */\nfunction read_buf(strm, buf, start, size) {\n var len = strm.avail_in;\n\n if (len > size) { len = size; }\n if (len === 0) { return 0; }\n\n strm.avail_in -= len;\n\n // zmemcpy(buf, strm->next_in, len);\n utils.arraySet(buf, strm.input, strm.next_in, len, start);\n if (strm.state.wrap === 1) {\n strm.adler = adler32(strm.adler, buf, len, start);\n }\n\n else if (strm.state.wrap === 2) {\n strm.adler = crc32(strm.adler, buf, len, start);\n }\n\n strm.next_in += len;\n strm.total_in += len;\n\n return len;\n}\n\n\n/* ===========================================================================\n * Set match_start to the longest match starting at the given string and\n * return its length. Matches shorter or equal to prev_length are discarded,\n * in which case the result is equal to prev_length and match_start is\n * garbage.\n * IN assertions: cur_match is the head of the hash chain for the current\n * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1\n * OUT assertion: the match length is not greater than s->lookahead.\n */\nfunction longest_match(s, cur_match) {\n var chain_length = s.max_chain_length; /* max hash chain length */\n var scan = s.strstart; /* current string */\n var match; /* matched string */\n var len; /* length of current match */\n var best_len = s.prev_length; /* best match length so far */\n var nice_match = s.nice_match; /* stop if match long enough */\n var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?\n s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;\n\n var _win = s.window; // shortcut\n\n var wmask = s.w_mask;\n var prev = s.prev;\n\n /* Stop when cur_match becomes <= limit. To simplify the code,\n * we prevent matches with the string of window index 0.\n */\n\n var strend = s.strstart + MAX_MATCH;\n var scan_end1 = _win[scan + best_len - 1];\n var scan_end = _win[scan + best_len];\n\n /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.\n * It is easy to get rid of this optimization if necessary.\n */\n // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, \"Code too clever\");\n\n /* Do not waste too much time if we already have a good match: */\n if (s.prev_length >= s.good_match) {\n chain_length >>= 2;\n }\n /* Do not look for matches beyond the end of the input. This is necessary\n * to make deflate deterministic.\n */\n if (nice_match > s.lookahead) { nice_match = s.lookahead; }\n\n // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, \"need lookahead\");\n\n do {\n // Assert(cur_match < s->strstart, \"no future\");\n match = cur_match;\n\n /* Skip to next match if the match length cannot increase\n * or if the match length is less than 2. Note that the checks below\n * for insufficient lookahead only occur occasionally for performance\n * reasons. Therefore uninitialized memory will be accessed, and\n * conditional jumps will be made that depend on those values.\n * However the length of the match is limited to the lookahead, so\n * the output of deflate is not affected by the uninitialized values.\n */\n\n if (_win[match + best_len] !== scan_end ||\n _win[match + best_len - 1] !== scan_end1 ||\n _win[match] !== _win[scan] ||\n _win[++match] !== _win[scan + 1]) {\n continue;\n }\n\n /* The check at best_len-1 can be removed because it will be made\n * again later. (This heuristic is not always a win.)\n * It is not necessary to compare scan[2] and match[2] since they\n * are always equal when the other bytes match, given that\n * the hash keys are equal and that HASH_BITS >= 8.\n */\n scan += 2;\n match++;\n // Assert(*scan == *match, \"match[2]?\");\n\n /* We check for insufficient lookahead only every 8th comparison;\n * the 256th check will be made at strstart+258.\n */\n do {\n /*jshint noempty:false*/\n } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n scan < strend);\n\n // Assert(scan <= s->window+(unsigned)(s->window_size-1), \"wild scan\");\n\n len = MAX_MATCH - (strend - scan);\n scan = strend - MAX_MATCH;\n\n if (len > best_len) {\n s.match_start = cur_match;\n best_len = len;\n if (len >= nice_match) {\n break;\n }\n scan_end1 = _win[scan + best_len - 1];\n scan_end = _win[scan + best_len];\n }\n } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0);\n\n if (best_len <= s.lookahead) {\n return best_len;\n }\n return s.lookahead;\n}\n\n\n/* ===========================================================================\n * Fill the window when the lookahead becomes insufficient.\n * Updates strstart and lookahead.\n *\n * IN assertion: lookahead < MIN_LOOKAHEAD\n * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD\n * At least one byte has been read, or avail_in == 0; reads are\n * performed for at least two bytes (required for the zip translate_eol\n * option -- not supported here).\n */\nfunction fill_window(s) {\n var _w_size = s.w_size;\n var p, n, m, more, str;\n\n //Assert(s->lookahead < MIN_LOOKAHEAD, \"already enough lookahead\");\n\n do {\n more = s.window_size - s.lookahead - s.strstart;\n\n // JS ints have 32 bit, block below not needed\n /* Deal with !@#$% 64K limit: */\n //if (sizeof(int) <= 2) {\n // if (more == 0 && s->strstart == 0 && s->lookahead == 0) {\n // more = wsize;\n //\n // } else if (more == (unsigned)(-1)) {\n // /* Very unlikely, but possible on 16 bit machine if\n // * strstart == 0 && lookahead == 1 (input done a byte at time)\n // */\n // more--;\n // }\n //}\n\n\n /* If the window is almost full and there is insufficient lookahead,\n * move the upper half to the lower one to make room in the upper half.\n */\n if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) {\n\n utils.arraySet(s.window, s.window, _w_size, _w_size, 0);\n s.match_start -= _w_size;\n s.strstart -= _w_size;\n /* we now have strstart >= MAX_DIST */\n s.block_start -= _w_size;\n\n /* Slide the hash table (could be avoided with 32 bit values\n at the expense of memory usage). We slide even when level == 0\n to keep the hash table consistent if we switch back to level > 0\n later. (Using level 0 permanently is not an optimal usage of\n zlib, so we don't care about this pathological case.)\n */\n\n n = s.hash_size;\n p = n;\n do {\n m = s.head[--p];\n s.head[p] = (m >= _w_size ? m - _w_size : 0);\n } while (--n);\n\n n = _w_size;\n p = n;\n do {\n m = s.prev[--p];\n s.prev[p] = (m >= _w_size ? m - _w_size : 0);\n /* If n is not on any hash chain, prev[n] is garbage but\n * its value will never be used.\n */\n } while (--n);\n\n more += _w_size;\n }\n if (s.strm.avail_in === 0) {\n break;\n }\n\n /* If there was no sliding:\n * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&\n * more == window_size - lookahead - strstart\n * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)\n * => more >= window_size - 2*WSIZE + 2\n * In the BIG_MEM or MMAP case (not yet supported),\n * window_size == input_size + MIN_LOOKAHEAD &&\n * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.\n * Otherwise, window_size == 2*WSIZE so more >= 2.\n * If there was sliding, more >= WSIZE. So in all cases, more >= 2.\n */\n //Assert(more >= 2, \"more < 2\");\n n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more);\n s.lookahead += n;\n\n /* Initialize the hash value now that we have some input: */\n if (s.lookahead + s.insert >= MIN_MATCH) {\n str = s.strstart - s.insert;\n s.ins_h = s.window[str];\n\n /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask;\n//#if MIN_MATCH != 3\n// Call update_hash() MIN_MATCH-3 more times\n//#endif\n while (s.insert) {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = str;\n str++;\n s.insert--;\n if (s.lookahead + s.insert < MIN_MATCH) {\n break;\n }\n }\n }\n /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,\n * but this is not important since only literal bytes will be emitted.\n */\n\n } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0);\n\n /* If the WIN_INIT bytes after the end of the current data have never been\n * written, then zero those bytes in order to avoid memory check reports of\n * the use of uninitialized (or uninitialised as Julian writes) bytes by\n * the longest match routines. Update the high water mark for the next\n * time through here. WIN_INIT is set to MAX_MATCH since the longest match\n * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.\n */\n// if (s.high_water < s.window_size) {\n// var curr = s.strstart + s.lookahead;\n// var init = 0;\n//\n// if (s.high_water < curr) {\n// /* Previous high water mark below current data -- zero WIN_INIT\n// * bytes or up to end of window, whichever is less.\n// */\n// init = s.window_size - curr;\n// if (init > WIN_INIT)\n// init = WIN_INIT;\n// zmemzero(s->window + curr, (unsigned)init);\n// s->high_water = curr + init;\n// }\n// else if (s->high_water < (ulg)curr + WIN_INIT) {\n// /* High water mark at or above current data, but below current data\n// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up\n// * to end of window, whichever is less.\n// */\n// init = (ulg)curr + WIN_INIT - s->high_water;\n// if (init > s->window_size - s->high_water)\n// init = s->window_size - s->high_water;\n// zmemzero(s->window + s->high_water, (unsigned)init);\n// s->high_water += init;\n// }\n// }\n//\n// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,\n// \"not enough room for search\");\n}\n\n/* ===========================================================================\n * Copy without compression as much as possible from the input stream, return\n * the current block state.\n * This function does not insert new strings in the dictionary since\n * uncompressible data is probably not useful. This function is used\n * only for the level=0 compression option.\n * NOTE: this function should be optimized to avoid extra copying from\n * window to pending_buf.\n */\nfunction deflate_stored(s, flush) {\n /* Stored blocks are limited to 0xffff bytes, pending_buf is limited\n * to pending_buf_size, and each stored block has a 5 byte header:\n */\n var max_block_size = 0xffff;\n\n if (max_block_size > s.pending_buf_size - 5) {\n max_block_size = s.pending_buf_size - 5;\n }\n\n /* Copy as much as possible from input to output: */\n for (;;) {\n /* Fill the window as much as possible: */\n if (s.lookahead <= 1) {\n\n //Assert(s->strstart < s->w_size+MAX_DIST(s) ||\n // s->block_start >= (long)s->w_size, \"slide too late\");\n// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) ||\n// s.block_start >= s.w_size)) {\n// throw new Error(\"slide too late\");\n// }\n\n fill_window(s);\n if (s.lookahead === 0 && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n\n if (s.lookahead === 0) {\n break;\n }\n /* flush the current block */\n }\n //Assert(s->block_start >= 0L, \"block gone\");\n// if (s.block_start < 0) throw new Error(\"block gone\");\n\n s.strstart += s.lookahead;\n s.lookahead = 0;\n\n /* Emit a stored block if pending_buf will be full: */\n var max_start = s.block_start + max_block_size;\n\n if (s.strstart === 0 || s.strstart >= max_start) {\n /* strstart == 0 is possible when wraparound on 16-bit machine */\n s.lookahead = s.strstart - max_start;\n s.strstart = max_start;\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n\n\n }\n /* Flush if we may have to slide, otherwise block_start may become\n * negative and the data will be gone:\n */\n if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n\n s.insert = 0;\n\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n\n if (s.strstart > s.block_start) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_NEED_MORE;\n}\n\n/* ===========================================================================\n * Compress as much as possible from the input stream, return the current\n * block state.\n * This function does not perform lazy evaluation of matches and inserts\n * new strings in the dictionary only for unmatched strings or for short\n * matches. It is used only for the fast compression options.\n */\nfunction deflate_fast(s, flush) {\n var hash_head; /* head of the hash chain */\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) {\n break; /* flush the current block */\n }\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n * At this point we have always match_length < MIN_MATCH\n */\n if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n }\n if (s.match_length >= MIN_MATCH) {\n // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only\n\n /*** _tr_tally_dist(s, s.strstart - s.match_start,\n s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n\n /* Insert new strings in the hash table only if the match length\n * is not too large. This saves time but degrades compression.\n */\n if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) {\n s.match_length--; /* string at strstart already in table */\n do {\n s.strstart++;\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n /* strstart never exceeds WSIZE-MAX_MATCH, so there are\n * always MIN_MATCH bytes ahead.\n */\n } while (--s.match_length !== 0);\n s.strstart++;\n } else\n {\n s.strstart += s.match_length;\n s.match_length = 0;\n s.ins_h = s.window[s.strstart];\n /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask;\n\n//#if MIN_MATCH != 3\n// Call UPDATE_HASH() MIN_MATCH-3 more times\n//#endif\n /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not\n * matter since it will be recomputed at next deflate call.\n */\n }\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s.window[s.strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1);\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * Same as above, but achieves better compression. We use a lazy\n * evaluation for matches: a match is finally adopted only if there is\n * no better match at the next window position.\n */\nfunction deflate_slow(s, flush) {\n var hash_head; /* head of hash chain */\n var bflush; /* set if current block must be flushed */\n\n var max_insert;\n\n /* Process the input block. */\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n */\n s.prev_length = s.match_length;\n s.prev_match = s.match_start;\n s.match_length = MIN_MATCH - 1;\n\n if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match &&\n s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n\n if (s.match_length <= 5 &&\n (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) {\n\n /* If prev_match is also MIN_MATCH, match_start is garbage\n * but we will ignore the current match anyway.\n */\n s.match_length = MIN_MATCH - 1;\n }\n }\n /* If there was a match at the previous step and the current\n * match is not better, output the previous match:\n */\n if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) {\n max_insert = s.strstart + s.lookahead - MIN_MATCH;\n /* Do not insert strings in hash table beyond this. */\n\n //check_match(s, s.strstart-1, s.prev_match, s.prev_length);\n\n /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match,\n s.prev_length - MIN_MATCH, bflush);***/\n bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH);\n /* Insert in hash table all strings up to the end of the match.\n * strstart-1 and strstart are already inserted. If there is not\n * enough lookahead, the last two strings are not inserted in\n * the hash table.\n */\n s.lookahead -= s.prev_length - 1;\n s.prev_length -= 2;\n do {\n if (++s.strstart <= max_insert) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask;\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n } while (--s.prev_length !== 0);\n s.match_available = 0;\n s.match_length = MIN_MATCH - 1;\n s.strstart++;\n\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n } else if (s.match_available) {\n /* If there was no match at the previous position, output a\n * single literal. If there was a match but the current match\n * is longer, truncate the previous match to a single literal.\n */\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n if (bflush) {\n /*** FLUSH_BLOCK_ONLY(s, 0) ***/\n flush_block_only(s, false);\n /***/\n }\n s.strstart++;\n s.lookahead--;\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n } else {\n /* There is no previous match to compare with, wait for\n * the next step to decide.\n */\n s.match_available = 1;\n s.strstart++;\n s.lookahead--;\n }\n }\n //Assert (flush != Z_NO_FLUSH, \"no flush?\");\n if (s.match_available) {\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);\n\n s.match_available = 0;\n }\n s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_BLOCK_DONE;\n}\n\n\n/* ===========================================================================\n * For Z_RLE, simply look for runs of bytes, generate matches only of distance\n * one. Do not maintain a hash table. (It will be regenerated if this run of\n * deflate switches away from Z_RLE.)\n */\nfunction deflate_rle(s, flush) {\n var bflush; /* set if current block must be flushed */\n var prev; /* byte at distance one to match */\n var scan, strend; /* scan goes up to strend for length of run */\n\n var _win = s.window;\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the longest run, plus one for the unrolled loop.\n */\n if (s.lookahead <= MAX_MATCH) {\n fill_window(s);\n if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* See how many times the previous byte repeats */\n s.match_length = 0;\n if (s.lookahead >= MIN_MATCH && s.strstart > 0) {\n scan = s.strstart - 1;\n prev = _win[scan];\n if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) {\n strend = s.strstart + MAX_MATCH;\n do {\n /*jshint noempty:false*/\n } while (prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n scan < strend);\n s.match_length = MAX_MATCH - (strend - scan);\n if (s.match_length > s.lookahead) {\n s.match_length = s.lookahead;\n }\n }\n //Assert(scan <= s->window+(uInt)(s->window_size-1), \"wild scan\");\n }\n\n /* Emit match if have run of MIN_MATCH or longer, else emit literal */\n if (s.match_length >= MIN_MATCH) {\n //check_match(s, s.strstart, s.strstart - 1, s.match_length);\n\n /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/\n bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH);\n\n s.lookahead -= s.match_length;\n s.strstart += s.match_length;\n s.match_length = 0;\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* ===========================================================================\n * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.\n * (It will be regenerated if this run of deflate switches away from Huffman.)\n */\nfunction deflate_huff(s, flush) {\n var bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we have a literal to write. */\n if (s.lookahead === 0) {\n fill_window(s);\n if (s.lookahead === 0) {\n if (flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n break; /* flush the current block */\n }\n }\n\n /* Output a literal byte */\n s.match_length = 0;\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = trees._tr_tally(s, 0, s.window[s.strstart]);\n s.lookahead--;\n s.strstart++;\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n}\n\n/* Values for max_lazy_match, good_match and max_chain_length, depending on\n * the desired pack level (0..9). The values given below have been tuned to\n * exclude worst case performance for pathological files. Better values may be\n * found for specific files.\n */\nfunction Config(good_length, max_lazy, nice_length, max_chain, func) {\n this.good_length = good_length;\n this.max_lazy = max_lazy;\n this.nice_length = nice_length;\n this.max_chain = max_chain;\n this.func = func;\n}\n\nvar configuration_table;\n\nconfiguration_table = [\n /* good lazy nice chain */\n new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */\n new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */\n new Config(4, 5, 16, 8, deflate_fast), /* 2 */\n new Config(4, 6, 32, 32, deflate_fast), /* 3 */\n\n new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */\n new Config(8, 16, 32, 32, deflate_slow), /* 5 */\n new Config(8, 16, 128, 128, deflate_slow), /* 6 */\n new Config(8, 32, 128, 256, deflate_slow), /* 7 */\n new Config(32, 128, 258, 1024, deflate_slow), /* 8 */\n new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */\n];\n\n\n/* ===========================================================================\n * Initialize the \"longest match\" routines for a new zlib stream\n */\nfunction lm_init(s) {\n s.window_size = 2 * s.w_size;\n\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n\n /* Set the default configuration parameters:\n */\n s.max_lazy_match = configuration_table[s.level].max_lazy;\n s.good_match = configuration_table[s.level].good_length;\n s.nice_match = configuration_table[s.level].nice_length;\n s.max_chain_length = configuration_table[s.level].max_chain;\n\n s.strstart = 0;\n s.block_start = 0;\n s.lookahead = 0;\n s.insert = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n s.ins_h = 0;\n}\n\n\nfunction DeflateState() {\n this.strm = null; /* pointer back to this zlib stream */\n this.status = 0; /* as the name implies */\n this.pending_buf = null; /* output still pending */\n this.pending_buf_size = 0; /* size of pending_buf */\n this.pending_out = 0; /* next pending byte to output to the stream */\n this.pending = 0; /* nb of bytes in the pending buffer */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.gzhead = null; /* gzip header information to write */\n this.gzindex = 0; /* where in extra, name, or comment */\n this.method = Z_DEFLATED; /* can only be DEFLATED */\n this.last_flush = -1; /* value of flush param for previous deflate call */\n\n this.w_size = 0; /* LZ77 window size (32K by default) */\n this.w_bits = 0; /* log2(w_size) (8..16) */\n this.w_mask = 0; /* w_size - 1 */\n\n this.window = null;\n /* Sliding window. Input bytes are read into the second half of the window,\n * and move to the first half later to keep a dictionary of at least wSize\n * bytes. With this organization, matches are limited to a distance of\n * wSize-MAX_MATCH bytes, but this ensures that IO is always\n * performed with a length multiple of the block size.\n */\n\n this.window_size = 0;\n /* Actual size of window: 2*wSize, except when the user input buffer\n * is directly used as sliding window.\n */\n\n this.prev = null;\n /* Link to older string with same hash index. To limit the size of this\n * array to 64K, this link is maintained only for the last 32K strings.\n * An index in this array is thus a window index modulo 32K.\n */\n\n this.head = null; /* Heads of the hash chains or NIL. */\n\n this.ins_h = 0; /* hash index of string to be inserted */\n this.hash_size = 0; /* number of elements in hash table */\n this.hash_bits = 0; /* log2(hash_size) */\n this.hash_mask = 0; /* hash_size-1 */\n\n this.hash_shift = 0;\n /* Number of bits by which ins_h must be shifted at each input\n * step. It must be such that after MIN_MATCH steps, the oldest\n * byte no longer takes part in the hash key, that is:\n * hash_shift * MIN_MATCH >= hash_bits\n */\n\n this.block_start = 0;\n /* Window position at the beginning of the current output block. Gets\n * negative when the window is moved backwards.\n */\n\n this.match_length = 0; /* length of best match */\n this.prev_match = 0; /* previous match */\n this.match_available = 0; /* set if previous match exists */\n this.strstart = 0; /* start of string to insert */\n this.match_start = 0; /* start of matching string */\n this.lookahead = 0; /* number of valid bytes ahead in window */\n\n this.prev_length = 0;\n /* Length of the best match at previous step. Matches not greater than this\n * are discarded. This is used in the lazy match evaluation.\n */\n\n this.max_chain_length = 0;\n /* To speed up deflation, hash chains are never searched beyond this\n * length. A higher limit improves compression ratio but degrades the\n * speed.\n */\n\n this.max_lazy_match = 0;\n /* Attempt to find a better match only when the current match is strictly\n * smaller than this value. This mechanism is used only for compression\n * levels >= 4.\n */\n // That's alias to max_lazy_match, don't use directly\n //this.max_insert_length = 0;\n /* Insert new strings in the hash table only if the match length is not\n * greater than this length. This saves time but degrades compression.\n * max_insert_length is used only for compression levels <= 3.\n */\n\n this.level = 0; /* compression level (1..9) */\n this.strategy = 0; /* favor or force Huffman coding*/\n\n this.good_match = 0;\n /* Use a faster search when the previous match is longer than this */\n\n this.nice_match = 0; /* Stop searching when current match exceeds this */\n\n /* used by trees.c: */\n\n /* Didn't use ct_data typedef below to suppress compiler warning */\n\n // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */\n // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */\n // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */\n\n // Use flat array of DOUBLE size, with interleaved fata,\n // because JS does not support effective\n this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2);\n this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2);\n this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2);\n zero(this.dyn_ltree);\n zero(this.dyn_dtree);\n zero(this.bl_tree);\n\n this.l_desc = null; /* desc. for literal tree */\n this.d_desc = null; /* desc. for distance tree */\n this.bl_desc = null; /* desc. for bit length tree */\n\n //ush bl_count[MAX_BITS+1];\n this.bl_count = new utils.Buf16(MAX_BITS + 1);\n /* number of codes at each bit length for an optimal tree */\n\n //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */\n this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */\n zero(this.heap);\n\n this.heap_len = 0; /* number of elements in the heap */\n this.heap_max = 0; /* element of largest frequency */\n /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.\n * The same heap array is used to build all trees.\n */\n\n this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1];\n zero(this.depth);\n /* Depth of each subtree used as tie breaker for trees of equal frequency\n */\n\n this.l_buf = 0; /* buffer index for literals or lengths */\n\n this.lit_bufsize = 0;\n /* Size of match buffer for literals/lengths. There are 4 reasons for\n * limiting lit_bufsize to 64K:\n * - frequencies can be kept in 16 bit counters\n * - if compression is not successful for the first block, all input\n * data is still in the window so we can still emit a stored block even\n * when input comes from standard input. (This can also be done for\n * all blocks if lit_bufsize is not greater than 32K.)\n * - if compression is not successful for a file smaller than 64K, we can\n * even emit a stored file instead of a stored block (saving 5 bytes).\n * This is applicable only for zip (not gzip or zlib).\n * - creating new Huffman trees less frequently may not provide fast\n * adaptation to changes in the input data statistics. (Take for\n * example a binary file with poorly compressible code followed by\n * a highly compressible string table.) Smaller buffer sizes give\n * fast adaptation but have of course the overhead of transmitting\n * trees more frequently.\n * - I can't count above 4\n */\n\n this.last_lit = 0; /* running index in l_buf */\n\n this.d_buf = 0;\n /* Buffer index for distances. To simplify the code, d_buf and l_buf have\n * the same number of elements. To use different lengths, an extra flag\n * array would be necessary.\n */\n\n this.opt_len = 0; /* bit length of current block with optimal trees */\n this.static_len = 0; /* bit length of current block with static trees */\n this.matches = 0; /* number of string matches in current block */\n this.insert = 0; /* bytes at end of window left to insert */\n\n\n this.bi_buf = 0;\n /* Output buffer. bits are inserted starting at the bottom (least\n * significant bits).\n */\n this.bi_valid = 0;\n /* Number of valid bits in bi_buf. All bits above the last valid bit\n * are always zero.\n */\n\n // Used for window memory init. We safely ignore it for JS. That makes\n // sense only for pointers and memory check tools.\n //this.high_water = 0;\n /* High water mark offset in window for initialized bytes -- bytes above\n * this are set to zero in order to avoid memory check warnings when\n * longest match routines access bytes past the input. This is then\n * updated to the new high water mark.\n */\n}\n\n\nfunction deflateResetKeep(strm) {\n var s;\n\n if (!strm || !strm.state) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.total_in = strm.total_out = 0;\n strm.data_type = Z_UNKNOWN;\n\n s = strm.state;\n s.pending = 0;\n s.pending_out = 0;\n\n if (s.wrap < 0) {\n s.wrap = -s.wrap;\n /* was made negative by deflate(..., Z_FINISH); */\n }\n s.status = (s.wrap ? INIT_STATE : BUSY_STATE);\n strm.adler = (s.wrap === 2) ?\n 0 // crc32(0, Z_NULL, 0)\n :\n 1; // adler32(0, Z_NULL, 0)\n s.last_flush = Z_NO_FLUSH;\n trees._tr_init(s);\n return Z_OK;\n}\n\n\nfunction deflateReset(strm) {\n var ret = deflateResetKeep(strm);\n if (ret === Z_OK) {\n lm_init(strm.state);\n }\n return ret;\n}\n\n\nfunction deflateSetHeader(strm, head) {\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; }\n strm.state.gzhead = head;\n return Z_OK;\n}\n\n\nfunction deflateInit2(strm, level, method, windowBits, memLevel, strategy) {\n if (!strm) { // === Z_NULL\n return Z_STREAM_ERROR;\n }\n var wrap = 1;\n\n if (level === Z_DEFAULT_COMPRESSION) {\n level = 6;\n }\n\n if (windowBits < 0) { /* suppress zlib wrapper */\n wrap = 0;\n windowBits = -windowBits;\n }\n\n else if (windowBits > 15) {\n wrap = 2; /* write gzip wrapper instead */\n windowBits -= 16;\n }\n\n\n if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED ||\n windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||\n strategy < 0 || strategy > Z_FIXED) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n\n if (windowBits === 8) {\n windowBits = 9;\n }\n /* until 256-byte window bug fixed */\n\n var s = new DeflateState();\n\n strm.state = s;\n s.strm = strm;\n\n s.wrap = wrap;\n s.gzhead = null;\n s.w_bits = windowBits;\n s.w_size = 1 << s.w_bits;\n s.w_mask = s.w_size - 1;\n\n s.hash_bits = memLevel + 7;\n s.hash_size = 1 << s.hash_bits;\n s.hash_mask = s.hash_size - 1;\n s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH);\n\n s.window = new utils.Buf8(s.w_size * 2);\n s.head = new utils.Buf16(s.hash_size);\n s.prev = new utils.Buf16(s.w_size);\n\n // Don't need mem init magic for JS.\n //s.high_water = 0; /* nothing written to s->window yet */\n\n s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */\n\n s.pending_buf_size = s.lit_bufsize * 4;\n\n //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);\n //s->pending_buf = (uchf *) overlay;\n s.pending_buf = new utils.Buf8(s.pending_buf_size);\n\n // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)\n //s->d_buf = overlay + s->lit_bufsize/sizeof(ush);\n s.d_buf = 1 * s.lit_bufsize;\n\n //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;\n s.l_buf = (1 + 2) * s.lit_bufsize;\n\n s.level = level;\n s.strategy = strategy;\n s.method = method;\n\n return deflateReset(strm);\n}\n\nfunction deflateInit(strm, level) {\n return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);\n}\n\n\nfunction deflate(strm, flush) {\n var old_flush, s;\n var beg, val; // for gzip header write only\n\n if (!strm || !strm.state ||\n flush > Z_BLOCK || flush < 0) {\n return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR;\n }\n\n s = strm.state;\n\n if (!strm.output ||\n (!strm.input && strm.avail_in !== 0) ||\n (s.status === FINISH_STATE && flush !== Z_FINISH)) {\n return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);\n }\n\n s.strm = strm; /* just in case */\n old_flush = s.last_flush;\n s.last_flush = flush;\n\n /* Write the header */\n if (s.status === INIT_STATE) {\n\n if (s.wrap === 2) { // GZIP header\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n put_byte(s, 31);\n put_byte(s, 139);\n put_byte(s, 8);\n if (!s.gzhead) { // s->gzhead == Z_NULL\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, OS_CODE);\n s.status = BUSY_STATE;\n }\n else {\n put_byte(s, (s.gzhead.text ? 1 : 0) +\n (s.gzhead.hcrc ? 2 : 0) +\n (!s.gzhead.extra ? 0 : 4) +\n (!s.gzhead.name ? 0 : 8) +\n (!s.gzhead.comment ? 0 : 16)\n );\n put_byte(s, s.gzhead.time & 0xff);\n put_byte(s, (s.gzhead.time >> 8) & 0xff);\n put_byte(s, (s.gzhead.time >> 16) & 0xff);\n put_byte(s, (s.gzhead.time >> 24) & 0xff);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, s.gzhead.os & 0xff);\n if (s.gzhead.extra && s.gzhead.extra.length) {\n put_byte(s, s.gzhead.extra.length & 0xff);\n put_byte(s, (s.gzhead.extra.length >> 8) & 0xff);\n }\n if (s.gzhead.hcrc) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0);\n }\n s.gzindex = 0;\n s.status = EXTRA_STATE;\n }\n }\n else // DEFLATE header\n {\n var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8;\n var level_flags = -1;\n\n if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) {\n level_flags = 0;\n } else if (s.level < 6) {\n level_flags = 1;\n } else if (s.level === 6) {\n level_flags = 2;\n } else {\n level_flags = 3;\n }\n header |= (level_flags << 6);\n if (s.strstart !== 0) { header |= PRESET_DICT; }\n header += 31 - (header % 31);\n\n s.status = BUSY_STATE;\n putShortMSB(s, header);\n\n /* Save the adler32 of the preset dictionary: */\n if (s.strstart !== 0) {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n strm.adler = 1; // adler32(0L, Z_NULL, 0);\n }\n }\n\n//#ifdef GZIP\n if (s.status === EXTRA_STATE) {\n if (s.gzhead.extra/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n\n while (s.gzindex < (s.gzhead.extra.length & 0xffff)) {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n break;\n }\n }\n put_byte(s, s.gzhead.extra[s.gzindex] & 0xff);\n s.gzindex++;\n }\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (s.gzindex === s.gzhead.extra.length) {\n s.gzindex = 0;\n s.status = NAME_STATE;\n }\n }\n else {\n s.status = NAME_STATE;\n }\n }\n if (s.status === NAME_STATE) {\n if (s.gzhead.name/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.name.length) {\n val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.gzindex = 0;\n s.status = COMMENT_STATE;\n }\n }\n else {\n s.status = COMMENT_STATE;\n }\n }\n if (s.status === COMMENT_STATE) {\n if (s.gzhead.comment/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.comment.length) {\n val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.status = HCRC_STATE;\n }\n }\n else {\n s.status = HCRC_STATE;\n }\n }\n if (s.status === HCRC_STATE) {\n if (s.gzhead.hcrc) {\n if (s.pending + 2 > s.pending_buf_size) {\n flush_pending(strm);\n }\n if (s.pending + 2 <= s.pending_buf_size) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n s.status = BUSY_STATE;\n }\n }\n else {\n s.status = BUSY_STATE;\n }\n }\n//#endif\n\n /* Flush as much pending output as possible */\n if (s.pending !== 0) {\n flush_pending(strm);\n if (strm.avail_out === 0) {\n /* Since avail_out is 0, deflate will be called again with\n * more output space, but possibly with both pending and\n * avail_in equal to zero. There won't be anything to do,\n * but this is not an error situation so make sure we\n * return OK instead of BUF_ERROR at next call of deflate:\n */\n s.last_flush = -1;\n return Z_OK;\n }\n\n /* Make sure there is something to do and avoid duplicate consecutive\n * flushes. For repeated and useless calls with Z_FINISH, we keep\n * returning Z_STREAM_END instead of Z_BUF_ERROR.\n */\n } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) &&\n flush !== Z_FINISH) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* User must not provide more input after the first FINISH: */\n if (s.status === FINISH_STATE && strm.avail_in !== 0) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* Start a new block or continue the current one.\n */\n if (strm.avail_in !== 0 || s.lookahead !== 0 ||\n (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) {\n var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) :\n (s.strategy === Z_RLE ? deflate_rle(s, flush) :\n configuration_table[s.level].func(s, flush));\n\n if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) {\n s.status = FINISH_STATE;\n }\n if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) {\n if (strm.avail_out === 0) {\n s.last_flush = -1;\n /* avoid BUF_ERROR next call, see above */\n }\n return Z_OK;\n /* If flush != Z_NO_FLUSH && avail_out == 0, the next call\n * of deflate should use the same flush parameter to make sure\n * that the flush is complete. So we don't have to output an\n * empty block here, this will be done at next call. This also\n * ensures that for a very small output buffer, we emit at most\n * one empty block.\n */\n }\n if (bstate === BS_BLOCK_DONE) {\n if (flush === Z_PARTIAL_FLUSH) {\n trees._tr_align(s);\n }\n else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */\n\n trees._tr_stored_block(s, 0, 0, false);\n /* For a full flush, this empty block will be recognized\n * as a special marker by inflate_sync().\n */\n if (flush === Z_FULL_FLUSH) {\n /*** CLEAR_HASH(s); ***/ /* forget history */\n zero(s.head); // Fill with NIL (= 0);\n\n if (s.lookahead === 0) {\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n }\n }\n flush_pending(strm);\n if (strm.avail_out === 0) {\n s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */\n return Z_OK;\n }\n }\n }\n //Assert(strm->avail_out > 0, \"bug2\");\n //if (strm.avail_out <= 0) { throw new Error(\"bug2\");}\n\n if (flush !== Z_FINISH) { return Z_OK; }\n if (s.wrap <= 0) { return Z_STREAM_END; }\n\n /* Write the trailer */\n if (s.wrap === 2) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n put_byte(s, (strm.adler >> 16) & 0xff);\n put_byte(s, (strm.adler >> 24) & 0xff);\n put_byte(s, strm.total_in & 0xff);\n put_byte(s, (strm.total_in >> 8) & 0xff);\n put_byte(s, (strm.total_in >> 16) & 0xff);\n put_byte(s, (strm.total_in >> 24) & 0xff);\n }\n else\n {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n\n flush_pending(strm);\n /* If avail_out is zero, the application will call deflate again\n * to flush the rest.\n */\n if (s.wrap > 0) { s.wrap = -s.wrap; }\n /* write the trailer only once! */\n return s.pending !== 0 ? Z_OK : Z_STREAM_END;\n}\n\nfunction deflateEnd(strm) {\n var status;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n status = strm.state.status;\n if (status !== INIT_STATE &&\n status !== EXTRA_STATE &&\n status !== NAME_STATE &&\n status !== COMMENT_STATE &&\n status !== HCRC_STATE &&\n status !== BUSY_STATE &&\n status !== FINISH_STATE\n ) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.state = null;\n\n return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK;\n}\n\n\n/* =========================================================================\n * Initializes the compression dictionary from the given byte\n * sequence without producing any compressed output.\n */\nfunction deflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var s;\n var str, n;\n var wrap;\n var avail;\n var next;\n var input;\n var tmpDict;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n s = strm.state;\n wrap = s.wrap;\n\n if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) {\n return Z_STREAM_ERROR;\n }\n\n /* when using zlib wrappers, compute Adler-32 for provided dictionary */\n if (wrap === 1) {\n /* adler32(strm->adler, dictionary, dictLength); */\n strm.adler = adler32(strm.adler, dictionary, dictLength, 0);\n }\n\n s.wrap = 0; /* avoid computing Adler-32 in read_buf */\n\n /* if dictionary would fill window, just replace the history */\n if (dictLength >= s.w_size) {\n if (wrap === 0) { /* already empty otherwise */\n /*** CLEAR_HASH(s); ***/\n zero(s.head); // Fill with NIL (= 0);\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n /* use the tail */\n // dictionary = dictionary.slice(dictLength - s.w_size);\n tmpDict = new utils.Buf8(s.w_size);\n utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0);\n dictionary = tmpDict;\n dictLength = s.w_size;\n }\n /* insert dictionary into window and hash */\n avail = strm.avail_in;\n next = strm.next_in;\n input = strm.input;\n strm.avail_in = dictLength;\n strm.next_in = 0;\n strm.input = dictionary;\n fill_window(s);\n while (s.lookahead >= MIN_MATCH) {\n str = s.strstart;\n n = s.lookahead - (MIN_MATCH - 1);\n do {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask;\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n\n s.head[s.ins_h] = str;\n str++;\n } while (--n);\n s.strstart = str;\n s.lookahead = MIN_MATCH - 1;\n fill_window(s);\n }\n s.strstart += s.lookahead;\n s.block_start = s.strstart;\n s.insert = s.lookahead;\n s.lookahead = 0;\n s.match_length = s.prev_length = MIN_MATCH - 1;\n s.match_available = 0;\n strm.next_in = next;\n strm.input = input;\n strm.avail_in = avail;\n s.wrap = wrap;\n return Z_OK;\n}\n\n\nexports.deflateInit = deflateInit;\nexports.deflateInit2 = deflateInit2;\nexports.deflateReset = deflateReset;\nexports.deflateResetKeep = deflateResetKeep;\nexports.deflateSetHeader = deflateSetHeader;\nexports.deflate = deflate;\nexports.deflateEnd = deflateEnd;\nexports.deflateSetDictionary = deflateSetDictionary;\nexports.deflateInfo = 'pako deflate (from Nodeca project)';\n\n/* Not implemented\nexports.deflateBound = deflateBound;\nexports.deflateCopy = deflateCopy;\nexports.deflateParams = deflateParams;\nexports.deflatePending = deflatePending;\nexports.deflatePrime = deflatePrime;\nexports.deflateTune = deflateTune;\n*/\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nmodule.exports = {\n 2: 'need dictionary', /* Z_NEED_DICT 2 */\n 1: 'stream end', /* Z_STREAM_END 1 */\n 0: '', /* Z_OK 0 */\n '-1': 'file error', /* Z_ERRNO (-1) */\n '-2': 'stream error', /* Z_STREAM_ERROR (-2) */\n '-3': 'data error', /* Z_DATA_ERROR (-3) */\n '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */\n '-5': 'buffer error', /* Z_BUF_ERROR (-5) */\n '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */\n};\n","// String encode/decode helpers\n'use strict';\n\n\nvar utils = require('./common');\n\n\n// Quick check if we can use fast array to bin string conversion\n//\n// - apply(Array) can fail on Android 2.2\n// - apply(Uint8Array) can fail on iOS 5.1 Safari\n//\nvar STR_APPLY_OK = true;\nvar STR_APPLY_UIA_OK = true;\n\ntry { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; }\ntry { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }\n\n\n// Table with utf8 lengths (calculated by first byte of sequence)\n// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,\n// because max possible codepoint is 0x10ffff\nvar _utf8len = new utils.Buf8(256);\nfor (var q = 0; q < 256; q++) {\n _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);\n}\n_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start\n\n\n// convert string to array (typed, when possible)\nexports.string2buf = function (str) {\n var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;\n\n // count binary size\n for (m_pos = 0; m_pos < str_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;\n }\n\n // allocate buffer\n buf = new utils.Buf8(buf_len);\n\n // convert\n for (i = 0, m_pos = 0; i < buf_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n if (c < 0x80) {\n /* one byte */\n buf[i++] = c;\n } else if (c < 0x800) {\n /* two bytes */\n buf[i++] = 0xC0 | (c >>> 6);\n buf[i++] = 0x80 | (c & 0x3f);\n } else if (c < 0x10000) {\n /* three bytes */\n buf[i++] = 0xE0 | (c >>> 12);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n } else {\n /* four bytes */\n buf[i++] = 0xf0 | (c >>> 18);\n buf[i++] = 0x80 | (c >>> 12 & 0x3f);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n }\n }\n\n return buf;\n};\n\n// Helper (used in 2 places)\nfunction buf2binstring(buf, len) {\n // On Chrome, the arguments in a function call that are allowed is `65534`.\n // If the length of the buffer is smaller than that, we can use this optimization,\n // otherwise we will take a slower path.\n if (len < 65534) {\n if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) {\n return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len));\n }\n }\n\n var result = '';\n for (var i = 0; i < len; i++) {\n result += String.fromCharCode(buf[i]);\n }\n return result;\n}\n\n\n// Convert byte array to binary string\nexports.buf2binstring = function (buf) {\n return buf2binstring(buf, buf.length);\n};\n\n\n// Convert binary string (typed, when possible)\nexports.binstring2buf = function (str) {\n var buf = new utils.Buf8(str.length);\n for (var i = 0, len = buf.length; i < len; i++) {\n buf[i] = str.charCodeAt(i);\n }\n return buf;\n};\n\n\n// convert array to string\nexports.buf2string = function (buf, max) {\n var i, out, c, c_len;\n var len = max || buf.length;\n\n // Reserve max possible length (2 words per char)\n // NB: by unknown reasons, Array is significantly faster for\n // String.fromCharCode.apply than Uint16Array.\n var utf16buf = new Array(len * 2);\n\n for (out = 0, i = 0; i < len;) {\n c = buf[i++];\n // quick process ascii\n if (c < 0x80) { utf16buf[out++] = c; continue; }\n\n c_len = _utf8len[c];\n // skip 5 & 6 byte codes\n if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }\n\n // apply mask on first byte\n c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;\n // join the rest\n while (c_len > 1 && i < len) {\n c = (c << 6) | (buf[i++] & 0x3f);\n c_len--;\n }\n\n // terminated by end of string?\n if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }\n\n if (c < 0x10000) {\n utf16buf[out++] = c;\n } else {\n c -= 0x10000;\n utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);\n utf16buf[out++] = 0xdc00 | (c & 0x3ff);\n }\n }\n\n return buf2binstring(utf16buf, out);\n};\n\n\n// Calculate max possible position in utf8 buffer,\n// that will not break sequence. If that's not possible\n// - (very small limits) return max size as is.\n//\n// buf[] - utf8 bytes array\n// max - length limit (mandatory);\nexports.utf8border = function (buf, max) {\n var pos;\n\n max = max || buf.length;\n if (max > buf.length) { max = buf.length; }\n\n // go back from last position, until start of sequence found\n pos = max - 1;\n while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }\n\n // Very small and broken sequence,\n // return max, because we should return something anyway.\n if (pos < 0) { return max; }\n\n // If we came to start of buffer - that means buffer is too small,\n // return max too.\n if (pos === 0) { return max; }\n\n return (pos + _utf8len[buf[pos]] > max) ? pos : max;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction ZStream() {\n /* next input byte */\n this.input = null; // JS specific, because we have no pointers\n this.next_in = 0;\n /* number of bytes available at input */\n this.avail_in = 0;\n /* total number of input bytes read so far */\n this.total_in = 0;\n /* next output byte should be put there */\n this.output = null; // JS specific, because we have no pointers\n this.next_out = 0;\n /* remaining free space at output */\n this.avail_out = 0;\n /* total number of bytes output so far */\n this.total_out = 0;\n /* last error message, NULL if no error */\n this.msg = ''/*Z_NULL*/;\n /* not visible by applications */\n this.state = null;\n /* best guess about the data type: binary or text */\n this.data_type = 2/*Z_UNKNOWN*/;\n /* adler32 value of the uncompressed data */\n this.adler = 0;\n}\n\nmodule.exports = ZStream;\n","'use strict';\n\n\nvar zlib_deflate = require('./zlib/deflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\n\nvar toString = Object.prototype.toString;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nvar Z_NO_FLUSH = 0;\nvar Z_FINISH = 4;\n\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_SYNC_FLUSH = 2;\n\nvar Z_DEFAULT_COMPRESSION = -1;\n\nvar Z_DEFAULT_STRATEGY = 0;\n\nvar Z_DEFLATED = 8;\n\n/* ===========================================================================*/\n\n\n/**\n * class Deflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[deflate]],\n * [[deflateRaw]] and [[gzip]].\n **/\n\n/* internal\n * Deflate.chunks -> Array\n *\n * Chunks of output data, if [[Deflate#onData]] not overridden.\n **/\n\n/**\n * Deflate.result -> Uint8Array|Array\n *\n * Compressed result, generated by default [[Deflate#onData]]\n * and [[Deflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Deflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Deflate.err -> Number\n *\n * Error code after deflate finished. 0 (Z_OK) on success.\n * You will not need it in real life, because deflate errors\n * are possible only on wrong options or bad `onData` / `onEnd`\n * custom handlers.\n **/\n\n/**\n * Deflate.msg -> String\n *\n * Error message, if [[Deflate.err]] != 0\n **/\n\n\n/**\n * new Deflate(options)\n * - options (Object): zlib deflate options.\n *\n * Creates new deflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `level`\n * - `windowBits`\n * - `memLevel`\n * - `strategy`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw deflate\n * - `gzip` (Boolean) - create gzip wrapper\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n * - `header` (Object) - custom header for gzip\n * - `text` (Boolean) - true if compressed data believed to be text\n * - `time` (Number) - modification time, unix timestamp\n * - `os` (Number) - operation system code\n * - `extra` (Array) - array of bytes with extra data (max 65536)\n * - `name` (String) - file name (binary string)\n * - `comment` (String) - comment (binary string)\n * - `hcrc` (Boolean) - true if header crc should be added\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var deflate = new pako.Deflate({ level: 3});\n *\n * deflate.push(chunk1, false);\n * deflate.push(chunk2, true); // true -> last chunk\n *\n * if (deflate.err) { throw new Error(deflate.err); }\n *\n * console.log(deflate.result);\n * ```\n **/\nfunction Deflate(options) {\n if (!(this instanceof Deflate)) return new Deflate(options);\n\n this.options = utils.assign({\n level: Z_DEFAULT_COMPRESSION,\n method: Z_DEFLATED,\n chunkSize: 16384,\n windowBits: 15,\n memLevel: 8,\n strategy: Z_DEFAULT_STRATEGY,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n if (opt.raw && (opt.windowBits > 0)) {\n opt.windowBits = -opt.windowBits;\n }\n\n else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {\n opt.windowBits += 16;\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_deflate.deflateInit2(\n this.strm,\n opt.level,\n opt.method,\n opt.windowBits,\n opt.memLevel,\n opt.strategy\n );\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n if (opt.header) {\n zlib_deflate.deflateSetHeader(this.strm, opt.header);\n }\n\n if (opt.dictionary) {\n var dict;\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n // If we need to compress text, change encoding to utf8.\n dict = strings.string2buf(opt.dictionary);\n } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {\n dict = new Uint8Array(opt.dictionary);\n } else {\n dict = opt.dictionary;\n }\n\n status = zlib_deflate.deflateSetDictionary(this.strm, dict);\n\n if (status !== Z_OK) {\n throw new Error(msg[status]);\n }\n\n this._dict_set = true;\n }\n}\n\n/**\n * Deflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be\n * converted to utf8 byte sequence.\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH.\n *\n * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with\n * new compressed chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the compression context.\n *\n * On fail call [[Deflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * array format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nDeflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var status, _mode;\n\n if (this.ended) { return false; }\n\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // If we need to compress text, change encoding to utf8.\n strm.input = strings.string2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n status = zlib_deflate.deflate(strm, _mode); /* no bad return value */\n\n if (status !== Z_STREAM_END && status !== Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) {\n if (this.options.to === 'string') {\n this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out)));\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END);\n\n // Finalize on the last chunk.\n if (_mode === Z_FINISH) {\n status = zlib_deflate.deflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === Z_SYNC_FLUSH) {\n this.onEnd(Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Deflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): output data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nDeflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Deflate#onEnd(status) -> Void\n * - status (Number): deflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called once after you tell deflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nDeflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === Z_OK) {\n if (this.options.to === 'string') {\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * deflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * Compress `data` with deflate algorithm and `options`.\n *\n * Supported options are:\n *\n * - level\n * - windowBits\n * - memLevel\n * - strategy\n * - dictionary\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be \"binary string\"\n * (each char code [0..255])\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , data = Uint8Array([1,2,3,4,5,6,7,8,9]);\n *\n * console.log(pako.deflate(data));\n * ```\n **/\nfunction deflate(input, options) {\n var deflator = new Deflate(options);\n\n deflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (deflator.err) { throw deflator.msg || msg[deflator.err]; }\n\n return deflator.result;\n}\n\n\n/**\n * deflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction deflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return deflate(input, options);\n}\n\n\n/**\n * gzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but create gzip wrapper instead of\n * deflate one.\n **/\nfunction gzip(input, options) {\n options = options || {};\n options.gzip = true;\n return deflate(input, options);\n}\n\n\nexports.Deflate = Deflate;\nexports.deflate = deflate;\nexports.deflateRaw = deflateRaw;\nexports.gzip = gzip;\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// See state defs from inflate.js\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\n\n/*\n Decode literal, length, and distance codes and write out the resulting\n literal and match bytes until either not enough input or output is\n available, an end-of-block is encountered, or a data error is encountered.\n When large enough input and output buffers are supplied to inflate(), for\n example, a 16K input buffer and a 64K output buffer, more than 95% of the\n inflate execution time is spent in this routine.\n\n Entry assumptions:\n\n state.mode === LEN\n strm.avail_in >= 6\n strm.avail_out >= 258\n start >= strm.avail_out\n state.bits < 8\n\n On return, state.mode is one of:\n\n LEN -- ran out of enough output space or enough available input\n TYPE -- reached end of block code, inflate() to interpret next block\n BAD -- error in block data\n\n Notes:\n\n - The maximum input bits used by a length/distance pair is 15 bits for the\n length code, 5 bits for the length extra, 15 bits for the distance code,\n and 13 bits for the distance extra. This totals 48 bits, or six bytes.\n Therefore if strm.avail_in >= 6, then there is enough input to avoid\n checking for available input while decoding.\n\n - The maximum bytes that a single length/distance pair can output is 258\n bytes, which is the maximum length that can be coded. inflate_fast()\n requires strm.avail_out >= 258 for each loop to avoid checking for\n output space.\n */\nmodule.exports = function inflate_fast(strm, start) {\n var state;\n var _in; /* local strm.input */\n var last; /* have enough input while in < last */\n var _out; /* local strm.output */\n var beg; /* inflate()'s initial strm.output */\n var end; /* while out < end, enough space available */\n//#ifdef INFLATE_STRICT\n var dmax; /* maximum distance from zlib header */\n//#endif\n var wsize; /* window size or zero if not using window */\n var whave; /* valid bytes in the window */\n var wnext; /* window write index */\n // Use `s_window` instead `window`, avoid conflict with instrumentation tools\n var s_window; /* allocated sliding window, if wsize != 0 */\n var hold; /* local strm.hold */\n var bits; /* local strm.bits */\n var lcode; /* local strm.lencode */\n var dcode; /* local strm.distcode */\n var lmask; /* mask for first level of length codes */\n var dmask; /* mask for first level of distance codes */\n var here; /* retrieved table entry */\n var op; /* code bits, operation, extra bits, or */\n /* window position, window bytes to copy */\n var len; /* match length, unused bytes */\n var dist; /* match distance */\n var from; /* where to copy match from */\n var from_source;\n\n\n var input, output; // JS specific, because we have no pointers\n\n /* copy state to local variables */\n state = strm.state;\n //here = state.here;\n _in = strm.next_in;\n input = strm.input;\n last = _in + (strm.avail_in - 5);\n _out = strm.next_out;\n output = strm.output;\n beg = _out - (start - strm.avail_out);\n end = _out + (strm.avail_out - 257);\n//#ifdef INFLATE_STRICT\n dmax = state.dmax;\n//#endif\n wsize = state.wsize;\n whave = state.whave;\n wnext = state.wnext;\n s_window = state.window;\n hold = state.hold;\n bits = state.bits;\n lcode = state.lencode;\n dcode = state.distcode;\n lmask = (1 << state.lenbits) - 1;\n dmask = (1 << state.distbits) - 1;\n\n\n /* decode literals and length/distances until end-of-block or not enough\n input data or output space */\n\n top:\n do {\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n\n here = lcode[hold & lmask];\n\n dolen:\n for (;;) { // Goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n if (op === 0) { /* literal */\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n output[_out++] = here & 0xffff/*here.val*/;\n }\n else if (op & 16) { /* length base */\n len = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (op) {\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n len += hold & ((1 << op) - 1);\n hold >>>= op;\n bits -= op;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", len));\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n here = dcode[hold & dmask];\n\n dodist:\n for (;;) { // goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n\n if (op & 16) { /* distance base */\n dist = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n }\n dist += hold & ((1 << op) - 1);\n//#ifdef INFLATE_STRICT\n if (dist > dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n//#endif\n hold >>>= op;\n bits -= op;\n //Tracevv((stderr, \"inflate: distance %u\\n\", dist));\n op = _out - beg; /* max distance in output */\n if (dist > op) { /* see if copy from window */\n op = dist - op; /* distance back in window */\n if (op > whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// if (len <= op - whave) {\n// do {\n// output[_out++] = 0;\n// } while (--len);\n// continue top;\n// }\n// len -= op - whave;\n// do {\n// output[_out++] = 0;\n// } while (--op > whave);\n// if (op === 0) {\n// from = _out - dist;\n// do {\n// output[_out++] = output[from++];\n// } while (--len);\n// continue top;\n// }\n//#endif\n }\n from = 0; // window index\n from_source = s_window;\n if (wnext === 0) { /* very common case */\n from += wsize - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n else if (wnext < op) { /* wrap around window */\n from += wsize + wnext - op;\n op -= wnext;\n if (op < len) { /* some from end of window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = 0;\n if (wnext < len) { /* some from start of window */\n op = wnext;\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n }\n else { /* contiguous in window */\n from += wnext - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n while (len > 2) {\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n len -= 3;\n }\n if (len) {\n output[_out++] = from_source[from++];\n if (len > 1) {\n output[_out++] = from_source[from++];\n }\n }\n }\n else {\n from = _out - dist; /* copy direct from output */\n do { /* minimum length is three */\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n len -= 3;\n } while (len > 2);\n if (len) {\n output[_out++] = output[from++];\n if (len > 1) {\n output[_out++] = output[from++];\n }\n }\n }\n }\n else if ((op & 64) === 0) { /* 2nd level distance code */\n here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dodist;\n }\n else {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n }\n else if ((op & 64) === 0) { /* 2nd level length code */\n here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dolen;\n }\n else if (op & 32) { /* end-of-block */\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.mode = TYPE;\n break top;\n }\n else {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n } while (_in < last && _out < end);\n\n /* return unused bytes (on entry, bits < 8, so in won't go too far back) */\n len = bits >> 3;\n _in -= len;\n bits -= len << 3;\n hold &= (1 << bits) - 1;\n\n /* update state and return */\n strm.next_in = _in;\n strm.next_out = _out;\n strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last));\n strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end));\n state.hold = hold;\n state.bits = bits;\n return;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\n\nvar MAXBITS = 15;\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\nvar lbase = [ /* Length codes 257..285 base */\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,\n 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0\n];\n\nvar lext = [ /* Length codes 257..285 extra */\n 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,\n 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78\n];\n\nvar dbase = [ /* Distance codes 0..29 base */\n 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,\n 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,\n 8193, 12289, 16385, 24577, 0, 0\n];\n\nvar dext = [ /* Distance codes 0..29 extra */\n 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,\n 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,\n 28, 28, 29, 29, 64, 64\n];\n\nmodule.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts)\n{\n var bits = opts.bits;\n //here = opts.here; /* table entry for duplication */\n\n var len = 0; /* a code's length in bits */\n var sym = 0; /* index of code symbols */\n var min = 0, max = 0; /* minimum and maximum code lengths */\n var root = 0; /* number of index bits for root table */\n var curr = 0; /* number of index bits for current table */\n var drop = 0; /* code bits to drop for sub-table */\n var left = 0; /* number of prefix codes available */\n var used = 0; /* code entries in table used */\n var huff = 0; /* Huffman code */\n var incr; /* for incrementing code, index */\n var fill; /* index for replicating entries */\n var low; /* low bits for current root entry */\n var mask; /* mask for low root bits */\n var next; /* next available space in table */\n var base = null; /* base value table to use */\n var base_index = 0;\n// var shoextra; /* extra bits table to use */\n var end; /* use base and extra for symbol > end */\n var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */\n var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */\n var extra = null;\n var extra_index = 0;\n\n var here_bits, here_op, here_val;\n\n /*\n Process a set of code lengths to create a canonical Huffman code. The\n code lengths are lens[0..codes-1]. Each length corresponds to the\n symbols 0..codes-1. The Huffman code is generated by first sorting the\n symbols by length from short to long, and retaining the symbol order\n for codes with equal lengths. Then the code starts with all zero bits\n for the first code of the shortest length, and the codes are integer\n increments for the same length, and zeros are appended as the length\n increases. For the deflate format, these bits are stored backwards\n from their more natural integer increment ordering, and so when the\n decoding tables are built in the large loop below, the integer codes\n are incremented backwards.\n\n This routine assumes, but does not check, that all of the entries in\n lens[] are in the range 0..MAXBITS. The caller must assure this.\n 1..MAXBITS is interpreted as that code length. zero means that that\n symbol does not occur in this code.\n\n The codes are sorted by computing a count of codes for each length,\n creating from that a table of starting indices for each length in the\n sorted table, and then entering the symbols in order in the sorted\n table. The sorted table is work[], with that space being provided by\n the caller.\n\n The length counts are used for other purposes as well, i.e. finding\n the minimum and maximum length codes, determining if there are any\n codes at all, checking for a valid set of lengths, and looking ahead\n at length counts to determine sub-table sizes when building the\n decoding tables.\n */\n\n /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */\n for (len = 0; len <= MAXBITS; len++) {\n count[len] = 0;\n }\n for (sym = 0; sym < codes; sym++) {\n count[lens[lens_index + sym]]++;\n }\n\n /* bound code lengths, force root to be within code lengths */\n root = bits;\n for (max = MAXBITS; max >= 1; max--) {\n if (count[max] !== 0) { break; }\n }\n if (root > max) {\n root = max;\n }\n if (max === 0) { /* no symbols to code at all */\n //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */\n //table.bits[opts.table_index] = 1; //here.bits = (var char)1;\n //table.val[opts.table_index++] = 0; //here.val = (var short)0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n\n //table.op[opts.table_index] = 64;\n //table.bits[opts.table_index] = 1;\n //table.val[opts.table_index++] = 0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n opts.bits = 1;\n return 0; /* no symbols, but wait for decoding to report error */\n }\n for (min = 1; min < max; min++) {\n if (count[min] !== 0) { break; }\n }\n if (root < min) {\n root = min;\n }\n\n /* check for an over-subscribed or incomplete set of lengths */\n left = 1;\n for (len = 1; len <= MAXBITS; len++) {\n left <<= 1;\n left -= count[len];\n if (left < 0) {\n return -1;\n } /* over-subscribed */\n }\n if (left > 0 && (type === CODES || max !== 1)) {\n return -1; /* incomplete set */\n }\n\n /* generate offsets into symbol table for each length for sorting */\n offs[1] = 0;\n for (len = 1; len < MAXBITS; len++) {\n offs[len + 1] = offs[len] + count[len];\n }\n\n /* sort symbols by length, by symbol order within each length */\n for (sym = 0; sym < codes; sym++) {\n if (lens[lens_index + sym] !== 0) {\n work[offs[lens[lens_index + sym]]++] = sym;\n }\n }\n\n /*\n Create and fill in decoding tables. In this loop, the table being\n filled is at next and has curr index bits. The code being used is huff\n with length len. That code is converted to an index by dropping drop\n bits off of the bottom. For codes where len is less than drop + curr,\n those top drop + curr - len bits are incremented through all values to\n fill the table with replicated entries.\n\n root is the number of index bits for the root table. When len exceeds\n root, sub-tables are created pointed to by the root entry with an index\n of the low root bits of huff. This is saved in low to check for when a\n new sub-table should be started. drop is zero when the root table is\n being filled, and drop is root when sub-tables are being filled.\n\n When a new sub-table is needed, it is necessary to look ahead in the\n code lengths to determine what size sub-table is needed. The length\n counts are used for this, and so count[] is decremented as codes are\n entered in the tables.\n\n used keeps track of how many table entries have been allocated from the\n provided *table space. It is checked for LENS and DIST tables against\n the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in\n the initial root table size constants. See the comments in inftrees.h\n for more information.\n\n sym increments through all symbols, and the loop terminates when\n all codes of length max, i.e. all codes, have been processed. This\n routine permits incomplete codes, so another loop after this one fills\n in the rest of the decoding tables with invalid code markers.\n */\n\n /* set up for code type */\n // poor man optimization - use if-else instead of switch,\n // to avoid deopts in old v8\n if (type === CODES) {\n base = extra = work; /* dummy value--not used */\n end = 19;\n\n } else if (type === LENS) {\n base = lbase;\n base_index -= 257;\n extra = lext;\n extra_index -= 257;\n end = 256;\n\n } else { /* DISTS */\n base = dbase;\n extra = dext;\n end = -1;\n }\n\n /* initialize opts for loop */\n huff = 0; /* starting code */\n sym = 0; /* starting code symbol */\n len = min; /* starting code length */\n next = table_index; /* current table to fill in */\n curr = root; /* current table index bits */\n drop = 0; /* current bits to drop from code for index */\n low = -1; /* trigger new sub-table when len > root */\n used = 1 << root; /* use root table entries */\n mask = used - 1; /* mask for comparing low */\n\n /* check available table space */\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* process all codes and make table entries */\n for (;;) {\n /* create table entry */\n here_bits = len - drop;\n if (work[sym] < end) {\n here_op = 0;\n here_val = work[sym];\n }\n else if (work[sym] > end) {\n here_op = extra[extra_index + work[sym]];\n here_val = base[base_index + work[sym]];\n }\n else {\n here_op = 32 + 64; /* end of block */\n here_val = 0;\n }\n\n /* replicate for those indices with low len bits equal to huff */\n incr = 1 << (len - drop);\n fill = 1 << curr;\n min = fill; /* save offset to next table */\n do {\n fill -= incr;\n table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0;\n } while (fill !== 0);\n\n /* backwards increment the len-bit code huff */\n incr = 1 << (len - 1);\n while (huff & incr) {\n incr >>= 1;\n }\n if (incr !== 0) {\n huff &= incr - 1;\n huff += incr;\n } else {\n huff = 0;\n }\n\n /* go to next symbol, update count, len */\n sym++;\n if (--count[len] === 0) {\n if (len === max) { break; }\n len = lens[lens_index + work[sym]];\n }\n\n /* create new sub-table if needed */\n if (len > root && (huff & mask) !== low) {\n /* if first time, transition to sub-tables */\n if (drop === 0) {\n drop = root;\n }\n\n /* increment past last table */\n next += min; /* here min is 1 << curr */\n\n /* determine length of next table */\n curr = len - drop;\n left = 1 << curr;\n while (curr + drop < max) {\n left -= count[curr + drop];\n if (left <= 0) { break; }\n curr++;\n left <<= 1;\n }\n\n /* check for enough space */\n used += 1 << curr;\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* point entry in root table to sub-table */\n low = huff & mask;\n /*table.op[low] = curr;\n table.bits[low] = root;\n table.val[low] = next - opts.table_index;*/\n table[low] = (root << 24) | (curr << 16) | (next - table_index) |0;\n }\n }\n\n /* fill in remaining table entry if code is incomplete (guaranteed to have\n at most one remaining entry, since if the code is incomplete, the\n maximum code length that was allowed to get this far is one bit) */\n if (huff !== 0) {\n //table.op[next + huff] = 64; /* invalid code marker */\n //table.bits[next + huff] = len - drop;\n //table.val[next + huff] = 0;\n table[next + huff] = ((len - drop) << 24) | (64 << 16) |0;\n }\n\n /* set return parameters */\n //opts.table_index += used;\n opts.bits = root;\n return 0;\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar utils = require('../utils/common');\nvar adler32 = require('./adler32');\nvar crc32 = require('./crc32');\nvar inflate_fast = require('./inffast');\nvar inflate_table = require('./inftrees');\n\nvar CODES = 0;\nvar LENS = 1;\nvar DISTS = 2;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n/* Allowed flush values; see deflate() and inflate() below for details */\n//var Z_NO_FLUSH = 0;\n//var Z_PARTIAL_FLUSH = 1;\n//var Z_SYNC_FLUSH = 2;\n//var Z_FULL_FLUSH = 3;\nvar Z_FINISH = 4;\nvar Z_BLOCK = 5;\nvar Z_TREES = 6;\n\n\n/* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\nvar Z_OK = 0;\nvar Z_STREAM_END = 1;\nvar Z_NEED_DICT = 2;\n//var Z_ERRNO = -1;\nvar Z_STREAM_ERROR = -2;\nvar Z_DATA_ERROR = -3;\nvar Z_MEM_ERROR = -4;\nvar Z_BUF_ERROR = -5;\n//var Z_VERSION_ERROR = -6;\n\n/* The deflate compression method */\nvar Z_DEFLATED = 8;\n\n\n/* STATES ====================================================================*/\n/* ===========================================================================*/\n\n\nvar HEAD = 1; /* i: waiting for magic header */\nvar FLAGS = 2; /* i: waiting for method and flags (gzip) */\nvar TIME = 3; /* i: waiting for modification time (gzip) */\nvar OS = 4; /* i: waiting for extra flags and operating system (gzip) */\nvar EXLEN = 5; /* i: waiting for extra length (gzip) */\nvar EXTRA = 6; /* i: waiting for extra bytes (gzip) */\nvar NAME = 7; /* i: waiting for end of file name (gzip) */\nvar COMMENT = 8; /* i: waiting for end of comment (gzip) */\nvar HCRC = 9; /* i: waiting for header crc (gzip) */\nvar DICTID = 10; /* i: waiting for dictionary check value */\nvar DICT = 11; /* waiting for inflateSetDictionary() call */\nvar TYPE = 12; /* i: waiting for type bits, including last-flag bit */\nvar TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */\nvar STORED = 14; /* i: waiting for stored size (length and complement) */\nvar COPY_ = 15; /* i/o: same as COPY below, but only first time in */\nvar COPY = 16; /* i/o: waiting for input or output to copy stored block */\nvar TABLE = 17; /* i: waiting for dynamic block table lengths */\nvar LENLENS = 18; /* i: waiting for code length code lengths */\nvar CODELENS = 19; /* i: waiting for length/lit and distance code lengths */\nvar LEN_ = 20; /* i: same as LEN below, but only first time in */\nvar LEN = 21; /* i: waiting for length/lit/eob code */\nvar LENEXT = 22; /* i: waiting for length extra bits */\nvar DIST = 23; /* i: waiting for distance code */\nvar DISTEXT = 24; /* i: waiting for distance extra bits */\nvar MATCH = 25; /* o: waiting for output space to copy string */\nvar LIT = 26; /* o: waiting for output space to write literal */\nvar CHECK = 27; /* i: waiting for 32-bit check value */\nvar LENGTH = 28; /* i: waiting for 32-bit length (gzip) */\nvar DONE = 29; /* finished check, done -- remain here until reset */\nvar BAD = 30; /* got a data error -- remain here until reset */\nvar MEM = 31; /* got an inflate() memory error -- remain here until reset */\nvar SYNC = 32; /* looking for synchronization bytes to restart inflate() */\n\n/* ===========================================================================*/\n\n\n\nvar ENOUGH_LENS = 852;\nvar ENOUGH_DISTS = 592;\n//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nvar MAX_WBITS = 15;\n/* 32K LZ77 window */\nvar DEF_WBITS = MAX_WBITS;\n\n\nfunction zswap32(q) {\n return (((q >>> 24) & 0xff) +\n ((q >>> 8) & 0xff00) +\n ((q & 0xff00) << 8) +\n ((q & 0xff) << 24));\n}\n\n\nfunction InflateState() {\n this.mode = 0; /* current inflate mode */\n this.last = false; /* true if processing last block */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.havedict = false; /* true if dictionary provided */\n this.flags = 0; /* gzip header method and flags (0 if zlib) */\n this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */\n this.check = 0; /* protected copy of check value */\n this.total = 0; /* protected copy of output count */\n // TODO: may be {}\n this.head = null; /* where to save gzip header information */\n\n /* sliding window */\n this.wbits = 0; /* log base 2 of requested window size */\n this.wsize = 0; /* window size or zero if not using window */\n this.whave = 0; /* valid bytes in the window */\n this.wnext = 0; /* window write index */\n this.window = null; /* allocated sliding window, if needed */\n\n /* bit accumulator */\n this.hold = 0; /* input bit accumulator */\n this.bits = 0; /* number of bits in \"in\" */\n\n /* for string and stored block copying */\n this.length = 0; /* literal or length of data to copy */\n this.offset = 0; /* distance back to copy string from */\n\n /* for table and code decoding */\n this.extra = 0; /* extra bits needed */\n\n /* fixed and dynamic code tables */\n this.lencode = null; /* starting table for length/literal codes */\n this.distcode = null; /* starting table for distance codes */\n this.lenbits = 0; /* index bits for lencode */\n this.distbits = 0; /* index bits for distcode */\n\n /* dynamic table building */\n this.ncode = 0; /* number of code length code lengths */\n this.nlen = 0; /* number of length code lengths */\n this.ndist = 0; /* number of distance code lengths */\n this.have = 0; /* number of code lengths in lens[] */\n this.next = null; /* next available space in codes[] */\n\n this.lens = new utils.Buf16(320); /* temporary storage for code lengths */\n this.work = new utils.Buf16(288); /* work area for code table building */\n\n /*\n because we don't have pointers in js, we use lencode and distcode directly\n as buffers so we don't need codes\n */\n //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */\n this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */\n this.distdyn = null; /* dynamic table for distance codes (JS specific) */\n this.sane = 0; /* if false, allow invalid distance too far */\n this.back = 0; /* bits back of last unprocessed length/lit */\n this.was = 0; /* initial length of match */\n}\n\nfunction inflateResetKeep(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n strm.total_in = strm.total_out = state.total = 0;\n strm.msg = ''; /*Z_NULL*/\n if (state.wrap) { /* to support ill-conceived Java test suite */\n strm.adler = state.wrap & 1;\n }\n state.mode = HEAD;\n state.last = 0;\n state.havedict = 0;\n state.dmax = 32768;\n state.head = null/*Z_NULL*/;\n state.hold = 0;\n state.bits = 0;\n //state.lencode = state.distcode = state.next = state.codes;\n state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS);\n state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS);\n\n state.sane = 1;\n state.back = -1;\n //Tracev((stderr, \"inflate: reset\\n\"));\n return Z_OK;\n}\n\nfunction inflateReset(strm) {\n var state;\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n state.wsize = 0;\n state.whave = 0;\n state.wnext = 0;\n return inflateResetKeep(strm);\n\n}\n\nfunction inflateReset2(strm, windowBits) {\n var wrap;\n var state;\n\n /* get the state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n /* extract wrap request from windowBits parameter */\n if (windowBits < 0) {\n wrap = 0;\n windowBits = -windowBits;\n }\n else {\n wrap = (windowBits >> 4) + 1;\n if (windowBits < 48) {\n windowBits &= 15;\n }\n }\n\n /* set number of window bits, free window if different */\n if (windowBits && (windowBits < 8 || windowBits > 15)) {\n return Z_STREAM_ERROR;\n }\n if (state.window !== null && state.wbits !== windowBits) {\n state.window = null;\n }\n\n /* update state and reset the rest of it */\n state.wrap = wrap;\n state.wbits = windowBits;\n return inflateReset(strm);\n}\n\nfunction inflateInit2(strm, windowBits) {\n var ret;\n var state;\n\n if (!strm) { return Z_STREAM_ERROR; }\n //strm.msg = Z_NULL; /* in case we return an error */\n\n state = new InflateState();\n\n //if (state === Z_NULL) return Z_MEM_ERROR;\n //Tracev((stderr, \"inflate: allocated\\n\"));\n strm.state = state;\n state.window = null/*Z_NULL*/;\n ret = inflateReset2(strm, windowBits);\n if (ret !== Z_OK) {\n strm.state = null/*Z_NULL*/;\n }\n return ret;\n}\n\nfunction inflateInit(strm) {\n return inflateInit2(strm, DEF_WBITS);\n}\n\n\n/*\n Return state with length and distance decoding tables and index sizes set to\n fixed code decoding. Normally this returns fixed tables from inffixed.h.\n If BUILDFIXED is defined, then instead this routine builds the tables the\n first time it's called, and returns those tables the first time and\n thereafter. This reduces the size of the code by about 2K bytes, in\n exchange for a little execution time. However, BUILDFIXED should not be\n used for threaded applications, since the rewriting of the tables and virgin\n may not be thread-safe.\n */\nvar virgin = true;\n\nvar lenfix, distfix; // We have no pointers in JS, so keep tables separate\n\nfunction fixedtables(state) {\n /* build fixed huffman tables if first call (may not be thread safe) */\n if (virgin) {\n var sym;\n\n lenfix = new utils.Buf32(512);\n distfix = new utils.Buf32(32);\n\n /* literal/length table */\n sym = 0;\n while (sym < 144) { state.lens[sym++] = 8; }\n while (sym < 256) { state.lens[sym++] = 9; }\n while (sym < 280) { state.lens[sym++] = 7; }\n while (sym < 288) { state.lens[sym++] = 8; }\n\n inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 });\n\n /* distance table */\n sym = 0;\n while (sym < 32) { state.lens[sym++] = 5; }\n\n inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 });\n\n /* do this just once */\n virgin = false;\n }\n\n state.lencode = lenfix;\n state.lenbits = 9;\n state.distcode = distfix;\n state.distbits = 5;\n}\n\n\n/*\n Update the window with the last wsize (normally 32K) bytes written before\n returning. If window does not exist yet, create it. This is only called\n when a window is already in use, or when output has been written during this\n inflate call, but the end of the deflate stream has not been reached yet.\n It is also called to create a window for dictionary data when a dictionary\n is loaded.\n\n Providing output buffers larger than 32K to inflate() should provide a speed\n advantage, since only the last 32K of output is copied to the sliding window\n upon return from inflate(), and since all distances after the first 32K of\n output will fall in the output data, making match copies simpler and faster.\n The advantage may be dependent on the size of the processor's data caches.\n */\nfunction updatewindow(strm, src, end, copy) {\n var dist;\n var state = strm.state;\n\n /* if it hasn't been done already, allocate space for the window */\n if (state.window === null) {\n state.wsize = 1 << state.wbits;\n state.wnext = 0;\n state.whave = 0;\n\n state.window = new utils.Buf8(state.wsize);\n }\n\n /* copy state->wsize or less output bytes into the circular window */\n if (copy >= state.wsize) {\n utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0);\n state.wnext = 0;\n state.whave = state.wsize;\n }\n else {\n dist = state.wsize - state.wnext;\n if (dist > copy) {\n dist = copy;\n }\n //zmemcpy(state->window + state->wnext, end - copy, dist);\n utils.arraySet(state.window, src, end - copy, dist, state.wnext);\n copy -= dist;\n if (copy) {\n //zmemcpy(state->window, end - copy, copy);\n utils.arraySet(state.window, src, end - copy, copy, 0);\n state.wnext = copy;\n state.whave = state.wsize;\n }\n else {\n state.wnext += dist;\n if (state.wnext === state.wsize) { state.wnext = 0; }\n if (state.whave < state.wsize) { state.whave += dist; }\n }\n }\n return 0;\n}\n\nfunction inflate(strm, flush) {\n var state;\n var input, output; // input/output buffers\n var next; /* next input INDEX */\n var put; /* next output INDEX */\n var have, left; /* available input and output */\n var hold; /* bit buffer */\n var bits; /* bits in bit buffer */\n var _in, _out; /* save starting available input and output */\n var copy; /* number of stored or match bytes to copy */\n var from; /* where to copy match bytes from */\n var from_source;\n var here = 0; /* current decoding table entry */\n var here_bits, here_op, here_val; // paked \"here\" denormalized (JS specific)\n //var last; /* parent table entry */\n var last_bits, last_op, last_val; // paked \"last\" denormalized (JS specific)\n var len; /* length to copy for repeats, bits to drop */\n var ret; /* return code */\n var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */\n var opts;\n\n var n; // temporary var for NEED_BITS\n\n var order = /* permutation of code lengths */\n [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];\n\n\n if (!strm || !strm.state || !strm.output ||\n (!strm.input && strm.avail_in !== 0)) {\n return Z_STREAM_ERROR;\n }\n\n state = strm.state;\n if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */\n\n\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n _in = have;\n _out = left;\n ret = Z_OK;\n\n inf_leave: // goto emulation\n for (;;) {\n switch (state.mode) {\n case HEAD:\n if (state.wrap === 0) {\n state.mode = TYPEDO;\n break;\n }\n //=== NEEDBITS(16);\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */\n state.check = 0/*crc32(0L, Z_NULL, 0)*/;\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = FLAGS;\n break;\n }\n state.flags = 0; /* expect zlib header */\n if (state.head) {\n state.head.done = false;\n }\n if (!(state.wrap & 1) || /* check if zlib header allowed */\n (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) {\n strm.msg = 'incorrect header check';\n state.mode = BAD;\n break;\n }\n if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n len = (hold & 0x0f)/*BITS(4)*/ + 8;\n if (state.wbits === 0) {\n state.wbits = len;\n }\n else if (len > state.wbits) {\n strm.msg = 'invalid window size';\n state.mode = BAD;\n break;\n }\n state.dmax = 1 << len;\n //Tracev((stderr, \"inflate: zlib header ok\\n\"));\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = hold & 0x200 ? DICTID : TYPE;\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n break;\n case FLAGS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.flags = hold;\n if ((state.flags & 0xff) !== Z_DEFLATED) {\n strm.msg = 'unknown compression method';\n state.mode = BAD;\n break;\n }\n if (state.flags & 0xe000) {\n strm.msg = 'unknown header flags set';\n state.mode = BAD;\n break;\n }\n if (state.head) {\n state.head.text = ((hold >> 8) & 1);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = TIME;\n /* falls through */\n case TIME:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.time = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC4(state.check, hold)\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n hbuf[2] = (hold >>> 16) & 0xff;\n hbuf[3] = (hold >>> 24) & 0xff;\n state.check = crc32(state.check, hbuf, 4, 0);\n //===\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = OS;\n /* falls through */\n case OS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.xflags = (hold & 0xff);\n state.head.os = (hold >> 8);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = EXLEN;\n /* falls through */\n case EXLEN:\n if (state.flags & 0x0400) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length = hold;\n if (state.head) {\n state.head.extra_len = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n else if (state.head) {\n state.head.extra = null/*Z_NULL*/;\n }\n state.mode = EXTRA;\n /* falls through */\n case EXTRA:\n if (state.flags & 0x0400) {\n copy = state.length;\n if (copy > have) { copy = have; }\n if (copy) {\n if (state.head) {\n len = state.head.extra_len - state.length;\n if (!state.head.extra) {\n // Use untyped array for more convenient processing later\n state.head.extra = new Array(state.head.extra_len);\n }\n utils.arraySet(\n state.head.extra,\n input,\n next,\n // extra field is limited to 65536 bytes\n // - no need for additional size check\n copy,\n /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/\n len\n );\n //zmemcpy(state.head.extra + len, next,\n // len + copy > state.head.extra_max ?\n // state.head.extra_max - len : copy);\n }\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n state.length -= copy;\n }\n if (state.length) { break inf_leave; }\n }\n state.length = 0;\n state.mode = NAME;\n /* falls through */\n case NAME:\n if (state.flags & 0x0800) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n // TODO: 2 or 1 bytes?\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.name_max*/)) {\n state.head.name += String.fromCharCode(len);\n }\n } while (len && copy < have);\n\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.name = null;\n }\n state.length = 0;\n state.mode = COMMENT;\n /* falls through */\n case COMMENT:\n if (state.flags & 0x1000) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.comm_max*/)) {\n state.head.comment += String.fromCharCode(len);\n }\n } while (len && copy < have);\n if (state.flags & 0x0200) {\n state.check = crc32(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.comment = null;\n }\n state.mode = HCRC;\n /* falls through */\n case HCRC:\n if (state.flags & 0x0200) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.check & 0xffff)) {\n strm.msg = 'header crc mismatch';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n if (state.head) {\n state.head.hcrc = ((state.flags >> 9) & 1);\n state.head.done = true;\n }\n strm.adler = state.check = 0;\n state.mode = TYPE;\n break;\n case DICTID:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n strm.adler = state.check = zswap32(hold);\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = DICT;\n /* falls through */\n case DICT:\n if (state.havedict === 0) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n return Z_NEED_DICT;\n }\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = TYPE;\n /* falls through */\n case TYPE:\n if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case TYPEDO:\n if (state.last) {\n //--- BYTEBITS() ---//\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n state.mode = CHECK;\n break;\n }\n //=== NEEDBITS(3); */\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.last = (hold & 0x01)/*BITS(1)*/;\n //--- DROPBITS(1) ---//\n hold >>>= 1;\n bits -= 1;\n //---//\n\n switch ((hold & 0x03)/*BITS(2)*/) {\n case 0: /* stored block */\n //Tracev((stderr, \"inflate: stored block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = STORED;\n break;\n case 1: /* fixed block */\n fixedtables(state);\n //Tracev((stderr, \"inflate: fixed codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = LEN_; /* decode codes */\n if (flush === Z_TREES) {\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break inf_leave;\n }\n break;\n case 2: /* dynamic block */\n //Tracev((stderr, \"inflate: dynamic codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = TABLE;\n break;\n case 3:\n strm.msg = 'invalid block type';\n state.mode = BAD;\n }\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break;\n case STORED:\n //--- BYTEBITS() ---// /* go to byte boundary */\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) {\n strm.msg = 'invalid stored block lengths';\n state.mode = BAD;\n break;\n }\n state.length = hold & 0xffff;\n //Tracev((stderr, \"inflate: stored length %u\\n\",\n // state.length));\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = COPY_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case COPY_:\n state.mode = COPY;\n /* falls through */\n case COPY:\n copy = state.length;\n if (copy) {\n if (copy > have) { copy = have; }\n if (copy > left) { copy = left; }\n if (copy === 0) { break inf_leave; }\n //--- zmemcpy(put, next, copy); ---\n utils.arraySet(output, input, next, copy, put);\n //---//\n have -= copy;\n next += copy;\n left -= copy;\n put += copy;\n state.length -= copy;\n break;\n }\n //Tracev((stderr, \"inflate: stored end\\n\"));\n state.mode = TYPE;\n break;\n case TABLE:\n //=== NEEDBITS(14); */\n while (bits < 14) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4;\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n//#ifndef PKZIP_BUG_WORKAROUND\n if (state.nlen > 286 || state.ndist > 30) {\n strm.msg = 'too many length or distance symbols';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracev((stderr, \"inflate: table sizes ok\\n\"));\n state.have = 0;\n state.mode = LENLENS;\n /* falls through */\n case LENLENS:\n while (state.have < state.ncode) {\n //=== NEEDBITS(3);\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.lens[order[state.have++]] = (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n while (state.have < 19) {\n state.lens[order[state.have++]] = 0;\n }\n // We have separate tables & no pointers. 2 commented lines below not needed.\n //state.next = state.codes;\n //state.lencode = state.next;\n // Switch to use dynamic table\n state.lencode = state.lendyn;\n state.lenbits = 7;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts);\n state.lenbits = opts.bits;\n\n if (ret) {\n strm.msg = 'invalid code lengths set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, \"inflate: code lengths ok\\n\"));\n state.have = 0;\n state.mode = CODELENS;\n /* falls through */\n case CODELENS:\n while (state.have < state.nlen + state.ndist) {\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_val < 16) {\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.lens[state.have++] = here_val;\n }\n else {\n if (here_val === 16) {\n //=== NEEDBITS(here.bits + 2);\n n = here_bits + 2;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n if (state.have === 0) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n len = state.lens[state.have - 1];\n copy = 3 + (hold & 0x03);//BITS(2);\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n }\n else if (here_val === 17) {\n //=== NEEDBITS(here.bits + 3);\n n = here_bits + 3;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 3 + (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n else {\n //=== NEEDBITS(here.bits + 7);\n n = here_bits + 7;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 11 + (hold & 0x7f);//BITS(7);\n //--- DROPBITS(7) ---//\n hold >>>= 7;\n bits -= 7;\n //---//\n }\n if (state.have + copy > state.nlen + state.ndist) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD;\n break;\n }\n while (copy--) {\n state.lens[state.have++] = len;\n }\n }\n }\n\n /* handle error breaks in while */\n if (state.mode === BAD) { break; }\n\n /* check for end-of-block code (better have one) */\n if (state.lens[256] === 0) {\n strm.msg = 'invalid code -- missing end-of-block';\n state.mode = BAD;\n break;\n }\n\n /* build code tables -- note: do not change the lenbits or distbits\n values here (9 and 6) without reading the comments in inftrees.h\n concerning the ENOUGH constants, which depend on those values */\n state.lenbits = 9;\n\n opts = { bits: state.lenbits };\n ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.lenbits = opts.bits;\n // state.lencode = state.next;\n\n if (ret) {\n strm.msg = 'invalid literal/lengths set';\n state.mode = BAD;\n break;\n }\n\n state.distbits = 6;\n //state.distcode.copy(state.codes);\n // Switch to use dynamic table\n state.distcode = state.distdyn;\n opts = { bits: state.distbits };\n ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.distbits = opts.bits;\n // state.distcode = state.next;\n\n if (ret) {\n strm.msg = 'invalid distances set';\n state.mode = BAD;\n break;\n }\n //Tracev((stderr, 'inflate: codes ok\\n'));\n state.mode = LEN_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case LEN_:\n state.mode = LEN;\n /* falls through */\n case LEN:\n if (have >= 6 && left >= 258) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n inflate_fast(strm, _out);\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n if (state.mode === TYPE) {\n state.back = -1;\n }\n break;\n }\n state.back = 0;\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if (here_bits <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_op && (here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.lencode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n state.length = here_val;\n if (here_op === 0) {\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n state.mode = LIT;\n break;\n }\n if (here_op & 32) {\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.back = -1;\n state.mode = TYPE;\n break;\n }\n if (here_op & 64) {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break;\n }\n state.extra = here_op & 15;\n state.mode = LENEXT;\n /* falls through */\n case LENEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", state.length));\n state.was = state.length;\n state.mode = DIST;\n /* falls through */\n case DIST:\n for (;;) {\n here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if ((here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.distcode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n if (here_op & 64) {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break;\n }\n state.offset = here_val;\n state.extra = (here_op) & 15;\n state.mode = DISTEXT;\n /* falls through */\n case DISTEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n//#ifdef INFLATE_STRICT\n if (state.offset > state.dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n//#endif\n //Tracevv((stderr, \"inflate: distance %u\\n\", state.offset));\n state.mode = MATCH;\n /* falls through */\n case MATCH:\n if (left === 0) { break inf_leave; }\n copy = _out - left;\n if (state.offset > copy) { /* copy from window */\n copy = state.offset - copy;\n if (copy > state.whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break;\n }\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// Trace((stderr, \"inflate.c too far\\n\"));\n// copy -= state.whave;\n// if (copy > state.length) { copy = state.length; }\n// if (copy > left) { copy = left; }\n// left -= copy;\n// state.length -= copy;\n// do {\n// output[put++] = 0;\n// } while (--copy);\n// if (state.length === 0) { state.mode = LEN; }\n// break;\n//#endif\n }\n if (copy > state.wnext) {\n copy -= state.wnext;\n from = state.wsize - copy;\n }\n else {\n from = state.wnext - copy;\n }\n if (copy > state.length) { copy = state.length; }\n from_source = state.window;\n }\n else { /* copy from output */\n from_source = output;\n from = put - state.offset;\n copy = state.length;\n }\n if (copy > left) { copy = left; }\n left -= copy;\n state.length -= copy;\n do {\n output[put++] = from_source[from++];\n } while (--copy);\n if (state.length === 0) { state.mode = LEN; }\n break;\n case LIT:\n if (left === 0) { break inf_leave; }\n output[put++] = state.length;\n left--;\n state.mode = LEN;\n break;\n case CHECK:\n if (state.wrap) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n // Use '|' instead of '+' to make sure that result is signed\n hold |= input[next++] << bits;\n bits += 8;\n }\n //===//\n _out -= left;\n strm.total_out += _out;\n state.total += _out;\n if (_out) {\n strm.adler = state.check =\n /*UPDATE(state.check, put - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out));\n\n }\n _out = left;\n // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too\n if ((state.flags ? hold : zswap32(hold)) !== state.check) {\n strm.msg = 'incorrect data check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: check matches trailer\\n\"));\n }\n state.mode = LENGTH;\n /* falls through */\n case LENGTH:\n if (state.wrap && state.flags) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.total & 0xffffffff)) {\n strm.msg = 'incorrect length check';\n state.mode = BAD;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: length matches trailer\\n\"));\n }\n state.mode = DONE;\n /* falls through */\n case DONE:\n ret = Z_STREAM_END;\n break inf_leave;\n case BAD:\n ret = Z_DATA_ERROR;\n break inf_leave;\n case MEM:\n return Z_MEM_ERROR;\n case SYNC:\n /* falls through */\n default:\n return Z_STREAM_ERROR;\n }\n }\n\n // inf_leave <- here is real place for \"goto inf_leave\", emulated via \"break inf_leave\"\n\n /*\n Return from inflate(), updating the total counts and the check value.\n If there was no progress during the inflate() call, return a buffer\n error. Call updatewindow() to create and/or update the window state.\n Note: a memory error from inflate() is non-recoverable.\n */\n\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n\n if (state.wsize || (_out !== strm.avail_out && state.mode < BAD &&\n (state.mode < CHECK || flush !== Z_FINISH))) {\n if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n }\n _in -= strm.avail_in;\n _out -= strm.avail_out;\n strm.total_in += _in;\n strm.total_out += _out;\n state.total += _out;\n if (state.wrap && _out) {\n strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/\n (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out));\n }\n strm.data_type = state.bits + (state.last ? 64 : 0) +\n (state.mode === TYPE ? 128 : 0) +\n (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0);\n if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) {\n ret = Z_BUF_ERROR;\n }\n return ret;\n}\n\nfunction inflateEnd(strm) {\n\n if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) {\n return Z_STREAM_ERROR;\n }\n\n var state = strm.state;\n if (state.window) {\n state.window = null;\n }\n strm.state = null;\n return Z_OK;\n}\n\nfunction inflateGetHeader(strm, head) {\n var state;\n\n /* check state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n state = strm.state;\n if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; }\n\n /* save header structure */\n state.head = head;\n head.done = false;\n return Z_OK;\n}\n\nfunction inflateSetDictionary(strm, dictionary) {\n var dictLength = dictionary.length;\n\n var state;\n var dictid;\n var ret;\n\n /* check state */\n if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; }\n state = strm.state;\n\n if (state.wrap !== 0 && state.mode !== DICT) {\n return Z_STREAM_ERROR;\n }\n\n /* check for correct dictionary identifier */\n if (state.mode === DICT) {\n dictid = 1; /* adler32(0, null, 0)*/\n /* dictid = adler32(dictid, dictionary, dictLength); */\n dictid = adler32(dictid, dictionary, dictLength, 0);\n if (dictid !== state.check) {\n return Z_DATA_ERROR;\n }\n }\n /* copy dictionary to window using updatewindow(), which will amend the\n existing dictionary if appropriate */\n ret = updatewindow(strm, dictionary, dictLength, dictLength);\n if (ret) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n state.havedict = 1;\n // Tracev((stderr, \"inflate: dictionary set\\n\"));\n return Z_OK;\n}\n\nexports.inflateReset = inflateReset;\nexports.inflateReset2 = inflateReset2;\nexports.inflateResetKeep = inflateResetKeep;\nexports.inflateInit = inflateInit;\nexports.inflateInit2 = inflateInit2;\nexports.inflate = inflate;\nexports.inflateEnd = inflateEnd;\nexports.inflateGetHeader = inflateGetHeader;\nexports.inflateSetDictionary = inflateSetDictionary;\nexports.inflateInfo = 'pako inflate (from Nodeca project)';\n\n/* Not implemented\nexports.inflateCopy = inflateCopy;\nexports.inflateGetDictionary = inflateGetDictionary;\nexports.inflateMark = inflateMark;\nexports.inflatePrime = inflatePrime;\nexports.inflateSync = inflateSync;\nexports.inflateSyncPoint = inflateSyncPoint;\nexports.inflateUndermine = inflateUndermine;\n*/\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nmodule.exports = {\n\n /* Allowed flush values; see deflate() and inflate() below for details */\n Z_NO_FLUSH: 0,\n Z_PARTIAL_FLUSH: 1,\n Z_SYNC_FLUSH: 2,\n Z_FULL_FLUSH: 3,\n Z_FINISH: 4,\n Z_BLOCK: 5,\n Z_TREES: 6,\n\n /* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\n Z_OK: 0,\n Z_STREAM_END: 1,\n Z_NEED_DICT: 2,\n Z_ERRNO: -1,\n Z_STREAM_ERROR: -2,\n Z_DATA_ERROR: -3,\n //Z_MEM_ERROR: -4,\n Z_BUF_ERROR: -5,\n //Z_VERSION_ERROR: -6,\n\n /* compression levels */\n Z_NO_COMPRESSION: 0,\n Z_BEST_SPEED: 1,\n Z_BEST_COMPRESSION: 9,\n Z_DEFAULT_COMPRESSION: -1,\n\n\n Z_FILTERED: 1,\n Z_HUFFMAN_ONLY: 2,\n Z_RLE: 3,\n Z_FIXED: 4,\n Z_DEFAULT_STRATEGY: 0,\n\n /* Possible values of the data_type field (though see inflate()) */\n Z_BINARY: 0,\n Z_TEXT: 1,\n //Z_ASCII: 1, // = Z_TEXT (deprecated)\n Z_UNKNOWN: 2,\n\n /* The deflate compression method */\n Z_DEFLATED: 8\n //Z_NULL: null // Use -1 or null inline, depending on var type\n};\n","'use strict';\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction GZheader() {\n /* true if compressed data believed to be text */\n this.text = 0;\n /* modification time */\n this.time = 0;\n /* extra flags (not used when writing a gzip file) */\n this.xflags = 0;\n /* operating system */\n this.os = 0;\n /* pointer to extra field or Z_NULL if none */\n this.extra = null;\n /* extra field length (valid if extra != Z_NULL) */\n this.extra_len = 0; // Actually, we don't need it in JS,\n // but leave for few code modifications\n\n //\n // Setup limits is not necessary because in js we should not preallocate memory\n // for inflate use constant limit in 65536 bytes\n //\n\n /* space at extra (only when reading header) */\n // this.extra_max = 0;\n /* pointer to zero-terminated file name or Z_NULL */\n this.name = '';\n /* space at name (only when reading header) */\n // this.name_max = 0;\n /* pointer to zero-terminated comment or Z_NULL */\n this.comment = '';\n /* space at comment (only when reading header) */\n // this.comm_max = 0;\n /* true if there was or will be a header crc */\n this.hcrc = 0;\n /* true when done reading gzip header (not used when writing a gzip file) */\n this.done = false;\n}\n\nmodule.exports = GZheader;\n","'use strict';\n\n\nvar zlib_inflate = require('./zlib/inflate');\nvar utils = require('./utils/common');\nvar strings = require('./utils/strings');\nvar c = require('./zlib/constants');\nvar msg = require('./zlib/messages');\nvar ZStream = require('./zlib/zstream');\nvar GZheader = require('./zlib/gzheader');\n\nvar toString = Object.prototype.toString;\n\n/**\n * class Inflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[inflate]]\n * and [[inflateRaw]].\n **/\n\n/* internal\n * inflate.chunks -> Array\n *\n * Chunks of output data, if [[Inflate#onData]] not overridden.\n **/\n\n/**\n * Inflate.result -> Uint8Array|Array|String\n *\n * Uncompressed result, generated by default [[Inflate#onData]]\n * and [[Inflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you\n * push a chunk with explicit flush (call [[Inflate#push]] with\n * `Z_SYNC_FLUSH` param).\n **/\n\n/**\n * Inflate.err -> Number\n *\n * Error code after inflate finished. 0 (Z_OK) on success.\n * Should be checked if broken data possible.\n **/\n\n/**\n * Inflate.msg -> String\n *\n * Error message, if [[Inflate.err]] != 0\n **/\n\n\n/**\n * new Inflate(options)\n * - options (Object): zlib inflate options.\n *\n * Creates new inflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `windowBits`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw inflate\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n * By default, when no options set, autodetect deflate/gzip data format via\n * wrapper header.\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * var inflate = new pako.Inflate({ level: 3});\n *\n * inflate.push(chunk1, false);\n * inflate.push(chunk2, true); // true -> last chunk\n *\n * if (inflate.err) { throw new Error(inflate.err); }\n *\n * console.log(inflate.result);\n * ```\n **/\nfunction Inflate(options) {\n if (!(this instanceof Inflate)) return new Inflate(options);\n\n this.options = utils.assign({\n chunkSize: 16384,\n windowBits: 0,\n to: ''\n }, options || {});\n\n var opt = this.options;\n\n // Force window size for `raw` data, if not set directly,\n // because we have no header for autodetect.\n if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) {\n opt.windowBits = -opt.windowBits;\n if (opt.windowBits === 0) { opt.windowBits = -15; }\n }\n\n // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate\n if ((opt.windowBits >= 0) && (opt.windowBits < 16) &&\n !(options && options.windowBits)) {\n opt.windowBits += 32;\n }\n\n // Gzip header has no info about windows size, we can do autodetect only\n // for deflate. So, if window size not set, force it to max when gzip possible\n if ((opt.windowBits > 15) && (opt.windowBits < 48)) {\n // bit 3 (16) -> gzipped data\n // bit 4 (32) -> autodetect gzip/deflate\n if ((opt.windowBits & 15) === 0) {\n opt.windowBits |= 15;\n }\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new ZStream();\n this.strm.avail_out = 0;\n\n var status = zlib_inflate.inflateInit2(\n this.strm,\n opt.windowBits\n );\n\n if (status !== c.Z_OK) {\n throw new Error(msg[status]);\n }\n\n this.header = new GZheader();\n\n zlib_inflate.inflateGetHeader(this.strm, this.header);\n\n // Setup dictionary\n if (opt.dictionary) {\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n opt.dictionary = strings.string2buf(opt.dictionary);\n } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {\n opt.dictionary = new Uint8Array(opt.dictionary);\n }\n if (opt.raw) { //In raw mode we need to set the dictionary early\n status = zlib_inflate.inflateSetDictionary(this.strm, opt.dictionary);\n if (status !== c.Z_OK) {\n throw new Error(msg[status]);\n }\n }\n }\n}\n\n/**\n * Inflate#push(data[, mode]) -> Boolean\n * - data (Uint8Array|Array|ArrayBuffer|String): input data\n * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH.\n *\n * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with\n * new output chunks. Returns `true` on success. The last data block must have\n * mode Z_FINISH (or `true`). That will flush internal pending buffers and call\n * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you\n * can use mode Z_SYNC_FLUSH, keeping the decompression context.\n *\n * On fail call [[Inflate#onEnd]] with error code and return false.\n *\n * We strongly recommend to use `Uint8Array` on input for best speed (output\n * format is detected automatically). Also, don't skip last param and always\n * use the same type in your code (boolean or number). That will improve JS speed.\n *\n * For regular `Array`-s make sure all elements are [0..255].\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nInflate.prototype.push = function (data, mode) {\n var strm = this.strm;\n var chunkSize = this.options.chunkSize;\n var dictionary = this.options.dictionary;\n var status, _mode;\n var next_out_utf8, tail, utf8str;\n\n // Flag to properly process Z_BUF_ERROR on testing inflate call\n // when we check that all output data was flushed.\n var allowBufError = false;\n\n if (this.ended) { return false; }\n _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH);\n\n // Convert data if needed\n if (typeof data === 'string') {\n // Only binary strings can be decompressed on practice\n strm.input = strings.binstring2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n do {\n if (strm.avail_out === 0) {\n strm.output = new utils.Buf8(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n\n status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */\n\n if (status === c.Z_NEED_DICT && dictionary) {\n status = zlib_inflate.inflateSetDictionary(this.strm, dictionary);\n }\n\n if (status === c.Z_BUF_ERROR && allowBufError === true) {\n status = c.Z_OK;\n allowBufError = false;\n }\n\n if (status !== c.Z_STREAM_END && status !== c.Z_OK) {\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n\n if (strm.next_out) {\n if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) {\n\n if (this.options.to === 'string') {\n\n next_out_utf8 = strings.utf8border(strm.output, strm.next_out);\n\n tail = strm.next_out - next_out_utf8;\n utf8str = strings.buf2string(strm.output, next_out_utf8);\n\n // move tail\n strm.next_out = tail;\n strm.avail_out = chunkSize - tail;\n if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); }\n\n this.onData(utf8str);\n\n } else {\n this.onData(utils.shrinkBuf(strm.output, strm.next_out));\n }\n }\n }\n\n // When no more input data, we should check that internal inflate buffers\n // are flushed. The only way to do it when avail_out = 0 - run one more\n // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR.\n // Here we set flag to process this error properly.\n //\n // NOTE. Deflate does not return error in this case and does not needs such\n // logic.\n if (strm.avail_in === 0 && strm.avail_out === 0) {\n allowBufError = true;\n }\n\n } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END);\n\n if (status === c.Z_STREAM_END) {\n _mode = c.Z_FINISH;\n }\n\n // Finalize on the last chunk.\n if (_mode === c.Z_FINISH) {\n status = zlib_inflate.inflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === c.Z_OK;\n }\n\n // callback interim results if Z_SYNC_FLUSH.\n if (_mode === c.Z_SYNC_FLUSH) {\n this.onEnd(c.Z_OK);\n strm.avail_out = 0;\n return true;\n }\n\n return true;\n};\n\n\n/**\n * Inflate#onData(chunk) -> Void\n * - chunk (Uint8Array|Array|String): output data. Type of array depends\n * on js engine support. When string output requested, each chunk\n * will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nInflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Inflate#onEnd(status) -> Void\n * - status (Number): inflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called either after you tell inflate that the input stream is\n * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)\n * or if an error happened. By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nInflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === c.Z_OK) {\n if (this.options.to === 'string') {\n // Glue & convert here, until we teach pako to send\n // utf8 aligned strings to onData\n this.result = this.chunks.join('');\n } else {\n this.result = utils.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * inflate(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Decompress `data` with inflate/ungzip and `options`. Autodetect\n * format via wrapper header by default. That's why we don't provide\n * separate `ungzip` method.\n *\n * Supported options are:\n *\n * - windowBits\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n *\n * ##### Example:\n *\n * ```javascript\n * var pako = require('pako')\n * , input = pako.deflate([1,2,3,4,5,6,7,8,9])\n * , output;\n *\n * try {\n * output = pako.inflate(input);\n * } catch (err)\n * console.log(err);\n * }\n * ```\n **/\nfunction inflate(input, options) {\n var inflator = new Inflate(options);\n\n inflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (inflator.err) { throw inflator.msg || msg[inflator.err]; }\n\n return inflator.result;\n}\n\n\n/**\n * inflateRaw(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * The same as [[inflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction inflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return inflate(input, options);\n}\n\n\n/**\n * ungzip(data[, options]) -> Uint8Array|Array|String\n * - data (Uint8Array|Array|String): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Just shortcut to [[inflate]], because it autodetects format\n * by header.content. Done for convenience.\n **/\n\n\nexports.Inflate = Inflate;\nexports.inflate = inflate;\nexports.inflateRaw = inflateRaw;\nexports.ungzip = inflate;\n","// Top level file is just a mixin of submodules & constants\n'use strict';\n\nvar assign = require('./lib/utils/common').assign;\n\nvar deflate = require('./lib/deflate');\nvar inflate = require('./lib/inflate');\nvar constants = require('./lib/zlib/constants');\n\nvar pako = {};\n\nassign(pako, deflate, inflate, constants);\n\nmodule.exports = pako;\n","import CharCodes from \"../syntax/CharCodes\";\nimport { charFromCode, copyStringIntoBuffer } from \"../../utils\";\nvar PDFHeader = /** @class */ (function () {\n function PDFHeader(major, minor) {\n this.major = String(major);\n this.minor = String(minor);\n }\n PDFHeader.prototype.toString = function () {\n var bc = charFromCode(129);\n return \"%PDF-\" + this.major + \".\" + this.minor + \"\\n%\" + bc + bc + bc + bc;\n };\n PDFHeader.prototype.sizeInBytes = function () {\n return 12 + this.major.length + this.minor.length;\n };\n PDFHeader.prototype.copyBytesInto = function (buffer, offset) {\n var initialOffset = offset;\n buffer[offset++] = CharCodes.Percent;\n buffer[offset++] = CharCodes.P;\n buffer[offset++] = CharCodes.D;\n buffer[offset++] = CharCodes.F;\n buffer[offset++] = CharCodes.Dash;\n offset += copyStringIntoBuffer(this.major, buffer, offset);\n buffer[offset++] = CharCodes.Period;\n offset += copyStringIntoBuffer(this.minor, buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.Percent;\n buffer[offset++] = 129;\n buffer[offset++] = 129;\n buffer[offset++] = 129;\n buffer[offset++] = 129;\n return offset - initialOffset;\n };\n PDFHeader.forVersion = function (major, minor) {\n return new PDFHeader(major, minor);\n };\n return PDFHeader;\n}());\nexport default PDFHeader;\n//# sourceMappingURL=PDFHeader.js.map","import { MethodNotImplementedError } from \"../errors\";\nvar PDFObject = /** @class */ (function () {\n function PDFObject() {\n }\n PDFObject.prototype.clone = function (_context) {\n throw new MethodNotImplementedError(this.constructor.name, 'clone');\n };\n PDFObject.prototype.toString = function () {\n throw new MethodNotImplementedError(this.constructor.name, 'toString');\n };\n PDFObject.prototype.sizeInBytes = function () {\n throw new MethodNotImplementedError(this.constructor.name, 'sizeInBytes');\n };\n PDFObject.prototype.copyBytesInto = function (_buffer, _offset) {\n throw new MethodNotImplementedError(this.constructor.name, 'copyBytesInto');\n };\n return PDFObject;\n}());\nexport default PDFObject;\n//# sourceMappingURL=PDFObject.js.map","import { __extends } from \"tslib\";\nimport { copyStringIntoBuffer, numberToString } from \"../../utils/index\";\nimport PDFObject from \"./PDFObject\";\nvar PDFNumber = /** @class */ (function (_super) {\n __extends(PDFNumber, _super);\n function PDFNumber(value) {\n var _this = _super.call(this) || this;\n _this.numberValue = value;\n _this.stringValue = numberToString(value);\n return _this;\n }\n PDFNumber.prototype.asNumber = function () {\n return this.numberValue;\n };\n /** @deprecated in favor of [[PDFNumber.asNumber]] */\n PDFNumber.prototype.value = function () {\n return this.numberValue;\n };\n PDFNumber.prototype.clone = function () {\n return PDFNumber.of(this.numberValue);\n };\n PDFNumber.prototype.toString = function () {\n return this.stringValue;\n };\n PDFNumber.prototype.sizeInBytes = function () {\n return this.stringValue.length;\n };\n PDFNumber.prototype.copyBytesInto = function (buffer, offset) {\n offset += copyStringIntoBuffer(this.stringValue, buffer, offset);\n return this.stringValue.length;\n };\n PDFNumber.of = function (value) { return new PDFNumber(value); };\n return PDFNumber;\n}(PDFObject));\nexport default PDFNumber;\n//# sourceMappingURL=PDFNumber.js.map","import { __extends, __spreadArrays } from \"tslib\";\nimport PDFNumber from \"./PDFNumber\";\nimport PDFObject from \"./PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nimport { PDFArrayIsNotRectangleError } from \"../errors\";\nvar PDFArray = /** @class */ (function (_super) {\n __extends(PDFArray, _super);\n function PDFArray(context) {\n var _this = _super.call(this) || this;\n _this.array = [];\n _this.context = context;\n return _this;\n }\n PDFArray.prototype.size = function () {\n return this.array.length;\n };\n PDFArray.prototype.push = function (object) {\n this.array.push(object);\n };\n PDFArray.prototype.insert = function (index, object) {\n this.array.splice(index, 0, object);\n };\n PDFArray.prototype.indexOf = function (object) {\n var index = this.array.indexOf(object);\n return index === -1 ? undefined : index;\n };\n PDFArray.prototype.remove = function (index) {\n this.array.splice(index, 1);\n };\n PDFArray.prototype.set = function (idx, object) {\n this.array[idx] = object;\n };\n PDFArray.prototype.get = function (index) {\n return this.array[index];\n };\n PDFArray.prototype.lookupMaybe = function (index) {\n var _a;\n var types = [];\n for (var _i = 1; _i < arguments.length; _i++) {\n types[_i - 1] = arguments[_i];\n }\n return (_a = this.context).lookupMaybe.apply(_a, __spreadArrays([this.get(index)], types));\n };\n PDFArray.prototype.lookup = function (index) {\n var _a;\n var types = [];\n for (var _i = 1; _i < arguments.length; _i++) {\n types[_i - 1] = arguments[_i];\n }\n return (_a = this.context).lookup.apply(_a, __spreadArrays([this.get(index)], types));\n };\n PDFArray.prototype.asRectangle = function () {\n if (this.size() !== 4)\n throw new PDFArrayIsNotRectangleError(this.size());\n var lowerLeftX = this.lookup(0, PDFNumber).asNumber();\n var lowerLeftY = this.lookup(1, PDFNumber).asNumber();\n var upperRightX = this.lookup(2, PDFNumber).asNumber();\n var upperRightY = this.lookup(3, PDFNumber).asNumber();\n var x = lowerLeftX;\n var y = lowerLeftY;\n var width = upperRightX - lowerLeftX;\n var height = upperRightY - lowerLeftY;\n return { x: x, y: y, width: width, height: height };\n };\n PDFArray.prototype.asArray = function () {\n return this.array.slice();\n };\n PDFArray.prototype.clone = function (context) {\n var clone = PDFArray.withContext(context || this.context);\n for (var idx = 0, len = this.size(); idx < len; idx++) {\n clone.push(this.array[idx]);\n }\n return clone;\n };\n PDFArray.prototype.toString = function () {\n var arrayString = '[ ';\n for (var idx = 0, len = this.size(); idx < len; idx++) {\n arrayString += this.get(idx).toString();\n arrayString += ' ';\n }\n arrayString += ']';\n return arrayString;\n };\n PDFArray.prototype.sizeInBytes = function () {\n var size = 3;\n for (var idx = 0, len = this.size(); idx < len; idx++) {\n size += this.get(idx).sizeInBytes() + 1;\n }\n return size;\n };\n PDFArray.prototype.copyBytesInto = function (buffer, offset) {\n var initialOffset = offset;\n buffer[offset++] = CharCodes.LeftSquareBracket;\n buffer[offset++] = CharCodes.Space;\n for (var idx = 0, len = this.size(); idx < len; idx++) {\n offset += this.get(idx).copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Space;\n }\n buffer[offset++] = CharCodes.RightSquareBracket;\n return offset - initialOffset;\n };\n PDFArray.prototype.scalePDFNumbers = function (x, y) {\n for (var idx = 0, len = this.size(); idx < len; idx++) {\n var el = this.lookup(idx);\n if (el instanceof PDFNumber) {\n var factor = idx % 2 === 0 ? x : y;\n this.set(idx, PDFNumber.of(el.asNumber() * factor));\n }\n }\n };\n PDFArray.withContext = function (context) { return new PDFArray(context); };\n return PDFArray;\n}(PDFObject));\nexport default PDFArray;\n//# sourceMappingURL=PDFArray.js.map","import { __extends } from \"tslib\";\nimport { PrivateConstructorError } from \"../errors\";\nimport PDFObject from \"./PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nvar ENFORCER = {};\nvar PDFBool = /** @class */ (function (_super) {\n __extends(PDFBool, _super);\n function PDFBool(enforcer, value) {\n var _this = this;\n if (enforcer !== ENFORCER)\n throw new PrivateConstructorError('PDFBool');\n _this = _super.call(this) || this;\n _this.value = value;\n return _this;\n }\n PDFBool.prototype.asBoolean = function () {\n return this.value;\n };\n PDFBool.prototype.clone = function () {\n return this;\n };\n PDFBool.prototype.toString = function () {\n return String(this.value);\n };\n PDFBool.prototype.sizeInBytes = function () {\n return this.value ? 4 : 5;\n };\n PDFBool.prototype.copyBytesInto = function (buffer, offset) {\n if (this.value) {\n buffer[offset++] = CharCodes.t;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.u;\n buffer[offset++] = CharCodes.e;\n return 4;\n }\n else {\n buffer[offset++] = CharCodes.f;\n buffer[offset++] = CharCodes.a;\n buffer[offset++] = CharCodes.l;\n buffer[offset++] = CharCodes.s;\n buffer[offset++] = CharCodes.e;\n return 5;\n }\n };\n PDFBool.True = new PDFBool(ENFORCER, true);\n PDFBool.False = new PDFBool(ENFORCER, false);\n return PDFBool;\n}(PDFObject));\nexport default PDFBool;\n//# sourceMappingURL=PDFBool.js.map","import CharCodes from \"./CharCodes\";\nexport var IsDelimiter = new Uint8Array(256);\nIsDelimiter[CharCodes.LeftParen] = 1;\nIsDelimiter[CharCodes.RightParen] = 1;\nIsDelimiter[CharCodes.LessThan] = 1;\nIsDelimiter[CharCodes.GreaterThan] = 1;\nIsDelimiter[CharCodes.LeftSquareBracket] = 1;\nIsDelimiter[CharCodes.RightSquareBracket] = 1;\nIsDelimiter[CharCodes.LeftCurly] = 1;\nIsDelimiter[CharCodes.RightCurly] = 1;\nIsDelimiter[CharCodes.ForwardSlash] = 1;\nIsDelimiter[CharCodes.Percent] = 1;\n//# sourceMappingURL=Delimiters.js.map","import CharCodes from \"./CharCodes\";\nexport var IsWhitespace = new Uint8Array(256);\nIsWhitespace[CharCodes.Null] = 1;\nIsWhitespace[CharCodes.Tab] = 1;\nIsWhitespace[CharCodes.Newline] = 1;\nIsWhitespace[CharCodes.FormFeed] = 1;\nIsWhitespace[CharCodes.CarriageReturn] = 1;\nIsWhitespace[CharCodes.Space] = 1;\n//# sourceMappingURL=Whitespace.js.map","import CharCodes from \"./CharCodes\";\nimport { IsDelimiter } from \"./Delimiters\";\nimport { IsWhitespace } from \"./Whitespace\";\nexport var IsIrregular = new Uint8Array(256);\nfor (var idx = 0, len = 256; idx < len; idx++) {\n IsIrregular[idx] = IsWhitespace[idx] || IsDelimiter[idx] ? 1 : 0;\n}\nIsIrregular[CharCodes.Hash] = 1;\n//# sourceMappingURL=Irregular.js.map","import { __extends } from \"tslib\";\nimport { PrivateConstructorError } from \"../errors\";\nimport PDFObject from \"./PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nimport { IsIrregular } from \"../syntax/Irregular\";\nimport { charFromHexCode, copyStringIntoBuffer, toCharCode, toHexString, } from \"../../utils\";\nvar decodeName = function (name) {\n return name.replace(/#([\\dABCDEF]{2})/g, function (_, hex) { return charFromHexCode(hex); });\n};\nvar isRegularChar = function (charCode) {\n return charCode >= CharCodes.ExclamationPoint &&\n charCode <= CharCodes.Tilde &&\n !IsIrregular[charCode];\n};\nvar ENFORCER = {};\nvar pool = new Map();\nvar PDFName = /** @class */ (function (_super) {\n __extends(PDFName, _super);\n function PDFName(enforcer, name) {\n var _this = this;\n if (enforcer !== ENFORCER)\n throw new PrivateConstructorError('PDFName');\n _this = _super.call(this) || this;\n var encodedName = '/';\n for (var idx = 0, len = name.length; idx < len; idx++) {\n var character = name[idx];\n var code = toCharCode(character);\n encodedName += isRegularChar(code) ? character : \"#\" + toHexString(code);\n }\n _this.encodedName = encodedName;\n return _this;\n }\n PDFName.prototype.asBytes = function () {\n var bytes = [];\n var hex = '';\n var escaped = false;\n var pushByte = function (byte) {\n if (byte !== undefined)\n bytes.push(byte);\n escaped = false;\n };\n for (var idx = 1, len = this.encodedName.length; idx < len; idx++) {\n var char = this.encodedName[idx];\n var byte = toCharCode(char);\n var nextChar = this.encodedName[idx + 1];\n if (!escaped) {\n if (byte === CharCodes.Hash)\n escaped = true;\n else\n pushByte(byte);\n }\n else {\n if ((byte >= CharCodes.Zero && byte <= CharCodes.Nine) ||\n (byte >= CharCodes.a && byte <= CharCodes.f) ||\n (byte >= CharCodes.A && byte <= CharCodes.F)) {\n hex += char;\n if (hex.length === 2 ||\n !((nextChar >= '0' && nextChar <= '9') ||\n (nextChar >= 'a' && nextChar <= 'f') ||\n (nextChar >= 'A' && nextChar <= 'F'))) {\n pushByte(parseInt(hex, 16));\n hex = '';\n }\n }\n else {\n pushByte(byte);\n }\n }\n }\n return new Uint8Array(bytes);\n };\n // TODO: This should probably use `utf8Decode()`\n // TODO: Polyfill Array.from?\n PDFName.prototype.decodeText = function () {\n var bytes = this.asBytes();\n return String.fromCharCode.apply(String, Array.from(bytes));\n };\n PDFName.prototype.asString = function () {\n return this.encodedName;\n };\n /** @deprecated in favor of [[PDFName.asString]] */\n PDFName.prototype.value = function () {\n return this.encodedName;\n };\n PDFName.prototype.clone = function () {\n return this;\n };\n PDFName.prototype.toString = function () {\n return this.encodedName;\n };\n PDFName.prototype.sizeInBytes = function () {\n return this.encodedName.length;\n };\n PDFName.prototype.copyBytesInto = function (buffer, offset) {\n offset += copyStringIntoBuffer(this.encodedName, buffer, offset);\n return this.encodedName.length;\n };\n PDFName.of = function (name) {\n var decodedValue = decodeName(name);\n var instance = pool.get(decodedValue);\n if (!instance) {\n instance = new PDFName(ENFORCER, decodedValue);\n pool.set(decodedValue, instance);\n }\n return instance;\n };\n /* tslint:disable member-ordering */\n PDFName.Length = PDFName.of('Length');\n PDFName.FlateDecode = PDFName.of('FlateDecode');\n PDFName.Resources = PDFName.of('Resources');\n PDFName.Font = PDFName.of('Font');\n PDFName.XObject = PDFName.of('XObject');\n PDFName.ExtGState = PDFName.of('ExtGState');\n PDFName.Contents = PDFName.of('Contents');\n PDFName.Type = PDFName.of('Type');\n PDFName.Parent = PDFName.of('Parent');\n PDFName.MediaBox = PDFName.of('MediaBox');\n PDFName.Page = PDFName.of('Page');\n PDFName.Annots = PDFName.of('Annots');\n PDFName.TrimBox = PDFName.of('TrimBox');\n PDFName.ArtBox = PDFName.of('ArtBox');\n PDFName.BleedBox = PDFName.of('BleedBox');\n PDFName.CropBox = PDFName.of('CropBox');\n PDFName.Rotate = PDFName.of('Rotate');\n PDFName.Title = PDFName.of('Title');\n PDFName.Author = PDFName.of('Author');\n PDFName.Subject = PDFName.of('Subject');\n PDFName.Creator = PDFName.of('Creator');\n PDFName.Keywords = PDFName.of('Keywords');\n PDFName.Producer = PDFName.of('Producer');\n PDFName.CreationDate = PDFName.of('CreationDate');\n PDFName.ModDate = PDFName.of('ModDate');\n return PDFName;\n}(PDFObject));\nexport default PDFName;\n//# sourceMappingURL=PDFName.js.map","var PDFOperatorNames;\n(function (PDFOperatorNames) {\n // Non Stroking Color Operators\n PDFOperatorNames[\"NonStrokingColor\"] = \"sc\";\n PDFOperatorNames[\"NonStrokingColorN\"] = \"scn\";\n PDFOperatorNames[\"NonStrokingColorRgb\"] = \"rg\";\n PDFOperatorNames[\"NonStrokingColorGray\"] = \"g\";\n PDFOperatorNames[\"NonStrokingColorCmyk\"] = \"k\";\n PDFOperatorNames[\"NonStrokingColorspace\"] = \"cs\";\n // Stroking Color Operators\n PDFOperatorNames[\"StrokingColor\"] = \"SC\";\n PDFOperatorNames[\"StrokingColorN\"] = \"SCN\";\n PDFOperatorNames[\"StrokingColorRgb\"] = \"RG\";\n PDFOperatorNames[\"StrokingColorGray\"] = \"G\";\n PDFOperatorNames[\"StrokingColorCmyk\"] = \"K\";\n PDFOperatorNames[\"StrokingColorspace\"] = \"CS\";\n // Marked Content Operators\n PDFOperatorNames[\"BeginMarkedContentSequence\"] = \"BDC\";\n PDFOperatorNames[\"BeginMarkedContent\"] = \"BMC\";\n PDFOperatorNames[\"EndMarkedContent\"] = \"EMC\";\n PDFOperatorNames[\"MarkedContentPointWithProps\"] = \"DP\";\n PDFOperatorNames[\"MarkedContentPoint\"] = \"MP\";\n PDFOperatorNames[\"DrawObject\"] = \"Do\";\n // Graphics State Operators\n PDFOperatorNames[\"ConcatTransformationMatrix\"] = \"cm\";\n PDFOperatorNames[\"PopGraphicsState\"] = \"Q\";\n PDFOperatorNames[\"PushGraphicsState\"] = \"q\";\n PDFOperatorNames[\"SetFlatness\"] = \"i\";\n PDFOperatorNames[\"SetGraphicsStateParams\"] = \"gs\";\n PDFOperatorNames[\"SetLineCapStyle\"] = \"J\";\n PDFOperatorNames[\"SetLineDashPattern\"] = \"d\";\n PDFOperatorNames[\"SetLineJoinStyle\"] = \"j\";\n PDFOperatorNames[\"SetLineMiterLimit\"] = \"M\";\n PDFOperatorNames[\"SetLineWidth\"] = \"w\";\n PDFOperatorNames[\"SetTextMatrix\"] = \"Tm\";\n PDFOperatorNames[\"SetRenderingIntent\"] = \"ri\";\n // Graphics Operators\n PDFOperatorNames[\"AppendRectangle\"] = \"re\";\n PDFOperatorNames[\"BeginInlineImage\"] = \"BI\";\n PDFOperatorNames[\"BeginInlineImageData\"] = \"ID\";\n PDFOperatorNames[\"EndInlineImage\"] = \"EI\";\n PDFOperatorNames[\"ClipEvenOdd\"] = \"W*\";\n PDFOperatorNames[\"ClipNonZero\"] = \"W\";\n PDFOperatorNames[\"CloseAndStroke\"] = \"s\";\n PDFOperatorNames[\"CloseFillEvenOddAndStroke\"] = \"b*\";\n PDFOperatorNames[\"CloseFillNonZeroAndStroke\"] = \"b\";\n PDFOperatorNames[\"ClosePath\"] = \"h\";\n PDFOperatorNames[\"AppendBezierCurve\"] = \"c\";\n PDFOperatorNames[\"CurveToReplicateFinalPoint\"] = \"y\";\n PDFOperatorNames[\"CurveToReplicateInitialPoint\"] = \"v\";\n PDFOperatorNames[\"EndPath\"] = \"n\";\n PDFOperatorNames[\"FillEvenOddAndStroke\"] = \"B*\";\n PDFOperatorNames[\"FillEvenOdd\"] = \"f*\";\n PDFOperatorNames[\"FillNonZeroAndStroke\"] = \"B\";\n PDFOperatorNames[\"FillNonZero\"] = \"f\";\n PDFOperatorNames[\"LegacyFillNonZero\"] = \"F\";\n PDFOperatorNames[\"LineTo\"] = \"l\";\n PDFOperatorNames[\"MoveTo\"] = \"m\";\n PDFOperatorNames[\"ShadingFill\"] = \"sh\";\n PDFOperatorNames[\"StrokePath\"] = \"S\";\n // Text Operators\n PDFOperatorNames[\"BeginText\"] = \"BT\";\n PDFOperatorNames[\"EndText\"] = \"ET\";\n PDFOperatorNames[\"MoveText\"] = \"Td\";\n PDFOperatorNames[\"MoveTextSetLeading\"] = \"TD\";\n PDFOperatorNames[\"NextLine\"] = \"T*\";\n PDFOperatorNames[\"SetCharacterSpacing\"] = \"Tc\";\n PDFOperatorNames[\"SetFontAndSize\"] = \"Tf\";\n PDFOperatorNames[\"SetTextHorizontalScaling\"] = \"Tz\";\n PDFOperatorNames[\"SetTextLineHeight\"] = \"TL\";\n PDFOperatorNames[\"SetTextRenderingMode\"] = \"Tr\";\n PDFOperatorNames[\"SetTextRise\"] = \"Ts\";\n PDFOperatorNames[\"SetWordSpacing\"] = \"Tw\";\n PDFOperatorNames[\"ShowText\"] = \"Tj\";\n PDFOperatorNames[\"ShowTextAdjusted\"] = \"TJ\";\n PDFOperatorNames[\"ShowTextLine\"] = \"'\";\n PDFOperatorNames[\"ShowTextLineAndSpace\"] = \"\\\"\";\n // Type3 Font Operators\n PDFOperatorNames[\"Type3D0\"] = \"d0\";\n PDFOperatorNames[\"Type3D1\"] = \"d1\";\n // Compatibility Section Operators\n PDFOperatorNames[\"BeginCompatibilitySection\"] = \"BX\";\n PDFOperatorNames[\"EndCompatibilitySection\"] = \"EX\";\n})(PDFOperatorNames || (PDFOperatorNames = {}));\nexport default PDFOperatorNames;\n//# sourceMappingURL=PDFOperatorNames.js.map","import { __extends } from \"tslib\";\nimport PDFObject from \"./PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nvar PDFNull = /** @class */ (function (_super) {\n __extends(PDFNull, _super);\n function PDFNull() {\n return _super !== null && _super.apply(this, arguments) || this;\n }\n PDFNull.prototype.asNull = function () {\n return null;\n };\n PDFNull.prototype.clone = function () {\n return this;\n };\n PDFNull.prototype.toString = function () {\n return 'null';\n };\n PDFNull.prototype.sizeInBytes = function () {\n return 4;\n };\n PDFNull.prototype.copyBytesInto = function (buffer, offset) {\n buffer[offset++] = CharCodes.n;\n buffer[offset++] = CharCodes.u;\n buffer[offset++] = CharCodes.l;\n buffer[offset++] = CharCodes.l;\n return 4;\n };\n return PDFNull;\n}(PDFObject));\nexport default new PDFNull();\n//# sourceMappingURL=PDFNull.js.map","import { __extends, __spreadArrays } from \"tslib\";\nimport PDFName from \"./PDFName\";\nimport PDFNull from \"./PDFNull\";\nimport PDFObject from \"./PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nvar PDFDict = /** @class */ (function (_super) {\n __extends(PDFDict, _super);\n function PDFDict(map, context) {\n var _this = _super.call(this) || this;\n _this.dict = map;\n _this.context = context;\n return _this;\n }\n PDFDict.prototype.keys = function () {\n return Array.from(this.dict.keys());\n };\n PDFDict.prototype.values = function () {\n return Array.from(this.dict.values());\n };\n PDFDict.prototype.entries = function () {\n return Array.from(this.dict.entries());\n };\n PDFDict.prototype.set = function (key, value) {\n this.dict.set(key, value);\n };\n PDFDict.prototype.get = function (key, \n // TODO: `preservePDFNull` is for backwards compatibility. Should be\n // removed in next breaking API change.\n preservePDFNull) {\n if (preservePDFNull === void 0) { preservePDFNull = false; }\n var value = this.dict.get(key);\n if (value === PDFNull && !preservePDFNull)\n return undefined;\n return value;\n };\n PDFDict.prototype.has = function (key) {\n var value = this.dict.get(key);\n return value !== undefined && value !== PDFNull;\n };\n PDFDict.prototype.lookupMaybe = function (key) {\n var _a;\n var types = [];\n for (var _i = 1; _i < arguments.length; _i++) {\n types[_i - 1] = arguments[_i];\n }\n // TODO: `preservePDFNull` is for backwards compatibility. Should be\n // removed in next breaking API change.\n var preservePDFNull = types.includes(PDFNull);\n var value = (_a = this.context).lookupMaybe.apply(_a, __spreadArrays([this.get(key, preservePDFNull)], types));\n if (value === PDFNull && !preservePDFNull)\n return undefined;\n return value;\n };\n PDFDict.prototype.lookup = function (key) {\n var _a;\n var types = [];\n for (var _i = 1; _i < arguments.length; _i++) {\n types[_i - 1] = arguments[_i];\n }\n // TODO: `preservePDFNull` is for backwards compatibility. Should be\n // removed in next breaking API change.\n var preservePDFNull = types.includes(PDFNull);\n var value = (_a = this.context).lookup.apply(_a, __spreadArrays([this.get(key, preservePDFNull)], types));\n if (value === PDFNull && !preservePDFNull)\n return undefined;\n return value;\n };\n PDFDict.prototype.delete = function (key) {\n return this.dict.delete(key);\n };\n PDFDict.prototype.asMap = function () {\n return new Map(this.dict);\n };\n /** Generate a random key that doesn't exist in current key set */\n PDFDict.prototype.uniqueKey = function (tag) {\n if (tag === void 0) { tag = ''; }\n var existingKeys = this.keys();\n var key = PDFName.of(this.context.addRandomSuffix(tag, 10));\n while (existingKeys.includes(key)) {\n key = PDFName.of(this.context.addRandomSuffix(tag, 10));\n }\n return key;\n };\n PDFDict.prototype.clone = function (context) {\n var clone = PDFDict.withContext(context || this.context);\n var entries = this.entries();\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], key = _a[0], value = _a[1];\n clone.set(key, value);\n }\n return clone;\n };\n PDFDict.prototype.toString = function () {\n var dictString = '<<\\n';\n var entries = this.entries();\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], key = _a[0], value = _a[1];\n dictString += key.toString() + ' ' + value.toString() + '\\n';\n }\n dictString += '>>';\n return dictString;\n };\n PDFDict.prototype.sizeInBytes = function () {\n var size = 5;\n var entries = this.entries();\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], key = _a[0], value = _a[1];\n size += key.sizeInBytes() + value.sizeInBytes() + 2;\n }\n return size;\n };\n PDFDict.prototype.copyBytesInto = function (buffer, offset) {\n var initialOffset = offset;\n buffer[offset++] = CharCodes.LessThan;\n buffer[offset++] = CharCodes.LessThan;\n buffer[offset++] = CharCodes.Newline;\n var entries = this.entries();\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], key = _a[0], value = _a[1];\n offset += key.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Space;\n offset += value.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n }\n buffer[offset++] = CharCodes.GreaterThan;\n buffer[offset++] = CharCodes.GreaterThan;\n return offset - initialOffset;\n };\n PDFDict.withContext = function (context) { return new PDFDict(new Map(), context); };\n PDFDict.fromMapWithContext = function (map, context) {\n return new PDFDict(map, context);\n };\n return PDFDict;\n}(PDFObject));\nexport default PDFDict;\n//# sourceMappingURL=PDFDict.js.map","import { __extends } from \"tslib\";\nimport { MethodNotImplementedError } from \"../errors\";\nimport PDFName from \"./PDFName\";\nimport PDFNumber from \"./PDFNumber\";\nimport PDFObject from \"./PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nvar PDFStream = /** @class */ (function (_super) {\n __extends(PDFStream, _super);\n function PDFStream(dict) {\n var _this = _super.call(this) || this;\n _this.dict = dict;\n return _this;\n }\n PDFStream.prototype.clone = function (_context) {\n throw new MethodNotImplementedError(this.constructor.name, 'clone');\n };\n PDFStream.prototype.getContentsString = function () {\n throw new MethodNotImplementedError(this.constructor.name, 'getContentsString');\n };\n PDFStream.prototype.getContents = function () {\n throw new MethodNotImplementedError(this.constructor.name, 'getContents');\n };\n PDFStream.prototype.getContentsSize = function () {\n throw new MethodNotImplementedError(this.constructor.name, 'getContentsSize');\n };\n PDFStream.prototype.updateDict = function () {\n var contentsSize = this.getContentsSize();\n this.dict.set(PDFName.Length, PDFNumber.of(contentsSize));\n };\n PDFStream.prototype.sizeInBytes = function () {\n this.updateDict();\n return this.dict.sizeInBytes() + this.getContentsSize() + 18;\n };\n PDFStream.prototype.toString = function () {\n this.updateDict();\n var streamString = this.dict.toString();\n streamString += '\\nstream\\n';\n streamString += this.getContentsString();\n streamString += '\\nendstream';\n return streamString;\n };\n PDFStream.prototype.copyBytesInto = function (buffer, offset) {\n this.updateDict();\n var initialOffset = offset;\n offset += this.dict.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.s;\n buffer[offset++] = CharCodes.t;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.e;\n buffer[offset++] = CharCodes.a;\n buffer[offset++] = CharCodes.m;\n buffer[offset++] = CharCodes.Newline;\n var contents = this.getContents();\n for (var idx = 0, len = contents.length; idx < len; idx++) {\n buffer[offset++] = contents[idx];\n }\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.e;\n buffer[offset++] = CharCodes.n;\n buffer[offset++] = CharCodes.d;\n buffer[offset++] = CharCodes.s;\n buffer[offset++] = CharCodes.t;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.e;\n buffer[offset++] = CharCodes.a;\n buffer[offset++] = CharCodes.m;\n return offset - initialOffset;\n };\n return PDFStream;\n}(PDFObject));\nexport default PDFStream;\n//# sourceMappingURL=PDFStream.js.map","import { __extends } from \"tslib\";\nimport PDFStream from \"./PDFStream\";\nimport { arrayAsString } from \"../../utils\";\nvar PDFRawStream = /** @class */ (function (_super) {\n __extends(PDFRawStream, _super);\n function PDFRawStream(dict, contents) {\n var _this = _super.call(this, dict) || this;\n _this.contents = contents;\n return _this;\n }\n PDFRawStream.prototype.asUint8Array = function () {\n return this.contents.slice();\n };\n PDFRawStream.prototype.clone = function (context) {\n return PDFRawStream.of(this.dict.clone(context), this.contents.slice());\n };\n PDFRawStream.prototype.getContentsString = function () {\n return arrayAsString(this.contents);\n };\n PDFRawStream.prototype.getContents = function () {\n return this.contents;\n };\n PDFRawStream.prototype.getContentsSize = function () {\n return this.contents.length;\n };\n PDFRawStream.of = function (dict, contents) {\n return new PDFRawStream(dict, contents);\n };\n return PDFRawStream;\n}(PDFStream));\nexport default PDFRawStream;\n//# sourceMappingURL=PDFRawStream.js.map","import { __extends } from \"tslib\";\nimport { PrivateConstructorError } from \"../errors\";\nimport PDFObject from \"./PDFObject\";\nimport { copyStringIntoBuffer } from \"../../utils\";\nvar ENFORCER = {};\nvar pool = new Map();\nvar PDFRef = /** @class */ (function (_super) {\n __extends(PDFRef, _super);\n function PDFRef(enforcer, objectNumber, generationNumber) {\n var _this = this;\n if (enforcer !== ENFORCER)\n throw new PrivateConstructorError('PDFRef');\n _this = _super.call(this) || this;\n _this.objectNumber = objectNumber;\n _this.generationNumber = generationNumber;\n _this.tag = objectNumber + \" \" + generationNumber + \" R\";\n return _this;\n }\n PDFRef.prototype.clone = function () {\n return this;\n };\n PDFRef.prototype.toString = function () {\n return this.tag;\n };\n PDFRef.prototype.sizeInBytes = function () {\n return this.tag.length;\n };\n PDFRef.prototype.copyBytesInto = function (buffer, offset) {\n offset += copyStringIntoBuffer(this.tag, buffer, offset);\n return this.tag.length;\n };\n PDFRef.of = function (objectNumber, generationNumber) {\n if (generationNumber === void 0) { generationNumber = 0; }\n var tag = objectNumber + \" \" + generationNumber + \" R\";\n var instance = pool.get(tag);\n if (!instance) {\n instance = new PDFRef(ENFORCER, objectNumber, generationNumber);\n pool.set(tag, instance);\n }\n return instance;\n };\n return PDFRef;\n}(PDFObject));\nexport default PDFRef;\n//# sourceMappingURL=PDFRef.js.map","import PDFObject from \"../objects/PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nimport { copyStringIntoBuffer } from \"../../utils\";\nvar PDFOperator = /** @class */ (function () {\n function PDFOperator(name, args) {\n this.name = name;\n this.args = args || [];\n }\n PDFOperator.prototype.clone = function (context) {\n var args = new Array(this.args.length);\n for (var idx = 0, len = args.length; idx < len; idx++) {\n var arg = this.args[idx];\n args[idx] = arg instanceof PDFObject ? arg.clone(context) : arg;\n }\n return PDFOperator.of(this.name, args);\n };\n PDFOperator.prototype.toString = function () {\n var value = '';\n for (var idx = 0, len = this.args.length; idx < len; idx++) {\n value += String(this.args[idx]) + ' ';\n }\n value += this.name;\n return value;\n };\n PDFOperator.prototype.sizeInBytes = function () {\n var size = 0;\n for (var idx = 0, len = this.args.length; idx < len; idx++) {\n var arg = this.args[idx];\n size += (arg instanceof PDFObject ? arg.sizeInBytes() : arg.length) + 1;\n }\n size += this.name.length;\n return size;\n };\n PDFOperator.prototype.copyBytesInto = function (buffer, offset) {\n var initialOffset = offset;\n for (var idx = 0, len = this.args.length; idx < len; idx++) {\n var arg = this.args[idx];\n if (arg instanceof PDFObject) {\n offset += arg.copyBytesInto(buffer, offset);\n }\n else {\n offset += copyStringIntoBuffer(arg, buffer, offset);\n }\n buffer[offset++] = CharCodes.Space;\n }\n offset += copyStringIntoBuffer(this.name, buffer, offset);\n return offset - initialOffset;\n };\n PDFOperator.of = function (name, args) {\n return new PDFOperator(name, args);\n };\n return PDFOperator;\n}());\nexport default PDFOperator;\n//# sourceMappingURL=PDFOperator.js.map","import { __extends } from \"tslib\";\nimport PDFName from \"../objects/PDFName\";\nimport PDFRef from \"../objects/PDFRef\";\nimport PDFFlateStream from \"./PDFFlateStream\";\nimport { bytesFor, Cache, reverseArray, sizeInBytes, sum } from \"../../utils\";\nexport var EntryType;\n(function (EntryType) {\n EntryType[EntryType[\"Deleted\"] = 0] = \"Deleted\";\n EntryType[EntryType[\"Uncompressed\"] = 1] = \"Uncompressed\";\n EntryType[EntryType[\"Compressed\"] = 2] = \"Compressed\";\n})(EntryType || (EntryType = {}));\n/**\n * Entries should be added using the [[addDeletedEntry]],\n * [[addUncompressedEntry]], and [[addCompressedEntry]] methods\n * **in order of ascending object number**.\n */\nvar PDFCrossRefStream = /** @class */ (function (_super) {\n __extends(PDFCrossRefStream, _super);\n function PDFCrossRefStream(dict, entries, encode) {\n if (encode === void 0) { encode = true; }\n var _this = _super.call(this, dict, encode) || this;\n // Returns an array of integer pairs for each subsection of the cross ref\n // section, where each integer pair represents:\n // firstObjectNumber(OfSection), length(OfSection)\n _this.computeIndex = function () {\n var subsections = [];\n var subsectionLength = 0;\n for (var idx = 0, len = _this.entries.length; idx < len; idx++) {\n var currEntry = _this.entries[idx];\n var prevEntry = _this.entries[idx - 1];\n if (idx === 0) {\n subsections.push(currEntry.ref.objectNumber);\n }\n else if (currEntry.ref.objectNumber - prevEntry.ref.objectNumber > 1) {\n subsections.push(subsectionLength);\n subsections.push(currEntry.ref.objectNumber);\n subsectionLength = 0;\n }\n subsectionLength += 1;\n }\n subsections.push(subsectionLength);\n return subsections;\n };\n _this.computeEntryTuples = function () {\n var entryTuples = new Array(_this.entries.length);\n for (var idx = 0, len = _this.entries.length; idx < len; idx++) {\n var entry = _this.entries[idx];\n if (entry.type === EntryType.Deleted) {\n var type = entry.type, nextFreeObjectNumber = entry.nextFreeObjectNumber, ref = entry.ref;\n entryTuples[idx] = [type, nextFreeObjectNumber, ref.generationNumber];\n }\n if (entry.type === EntryType.Uncompressed) {\n var type = entry.type, offset = entry.offset, ref = entry.ref;\n entryTuples[idx] = [type, offset, ref.generationNumber];\n }\n if (entry.type === EntryType.Compressed) {\n var type = entry.type, objectStreamRef = entry.objectStreamRef, index = entry.index;\n entryTuples[idx] = [type, objectStreamRef.objectNumber, index];\n }\n }\n return entryTuples;\n };\n _this.computeMaxEntryByteWidths = function () {\n var entryTuples = _this.entryTuplesCache.access();\n var widths = [0, 0, 0];\n for (var idx = 0, len = entryTuples.length; idx < len; idx++) {\n var _a = entryTuples[idx], first = _a[0], second = _a[1], third = _a[2];\n var firstSize = sizeInBytes(first);\n var secondSize = sizeInBytes(second);\n var thirdSize = sizeInBytes(third);\n if (firstSize > widths[0])\n widths[0] = firstSize;\n if (secondSize > widths[1])\n widths[1] = secondSize;\n if (thirdSize > widths[2])\n widths[2] = thirdSize;\n }\n return widths;\n };\n _this.entries = entries || [];\n _this.entryTuplesCache = Cache.populatedBy(_this.computeEntryTuples);\n _this.maxByteWidthsCache = Cache.populatedBy(_this.computeMaxEntryByteWidths);\n _this.indexCache = Cache.populatedBy(_this.computeIndex);\n dict.set(PDFName.of('Type'), PDFName.of('XRef'));\n return _this;\n }\n PDFCrossRefStream.prototype.addDeletedEntry = function (ref, nextFreeObjectNumber) {\n var type = EntryType.Deleted;\n this.entries.push({ type: type, ref: ref, nextFreeObjectNumber: nextFreeObjectNumber });\n this.entryTuplesCache.invalidate();\n this.maxByteWidthsCache.invalidate();\n this.indexCache.invalidate();\n this.contentsCache.invalidate();\n };\n PDFCrossRefStream.prototype.addUncompressedEntry = function (ref, offset) {\n var type = EntryType.Uncompressed;\n this.entries.push({ type: type, ref: ref, offset: offset });\n this.entryTuplesCache.invalidate();\n this.maxByteWidthsCache.invalidate();\n this.indexCache.invalidate();\n this.contentsCache.invalidate();\n };\n PDFCrossRefStream.prototype.addCompressedEntry = function (ref, objectStreamRef, index) {\n var type = EntryType.Compressed;\n this.entries.push({ type: type, ref: ref, objectStreamRef: objectStreamRef, index: index });\n this.entryTuplesCache.invalidate();\n this.maxByteWidthsCache.invalidate();\n this.indexCache.invalidate();\n this.contentsCache.invalidate();\n };\n PDFCrossRefStream.prototype.clone = function (context) {\n var _a = this, dict = _a.dict, entries = _a.entries, encode = _a.encode;\n return PDFCrossRefStream.of(dict.clone(context), entries.slice(), encode);\n };\n PDFCrossRefStream.prototype.getContentsString = function () {\n var entryTuples = this.entryTuplesCache.access();\n var byteWidths = this.maxByteWidthsCache.access();\n var value = '';\n for (var entryIdx = 0, entriesLen = entryTuples.length; entryIdx < entriesLen; entryIdx++) {\n var _a = entryTuples[entryIdx], first = _a[0], second = _a[1], third = _a[2];\n var firstBytes = reverseArray(bytesFor(first));\n var secondBytes = reverseArray(bytesFor(second));\n var thirdBytes = reverseArray(bytesFor(third));\n for (var idx = byteWidths[0] - 1; idx >= 0; idx--) {\n value += (firstBytes[idx] || 0).toString(2);\n }\n for (var idx = byteWidths[1] - 1; idx >= 0; idx--) {\n value += (secondBytes[idx] || 0).toString(2);\n }\n for (var idx = byteWidths[2] - 1; idx >= 0; idx--) {\n value += (thirdBytes[idx] || 0).toString(2);\n }\n }\n return value;\n };\n PDFCrossRefStream.prototype.getUnencodedContents = function () {\n var entryTuples = this.entryTuplesCache.access();\n var byteWidths = this.maxByteWidthsCache.access();\n var buffer = new Uint8Array(this.getUnencodedContentsSize());\n var offset = 0;\n for (var entryIdx = 0, entriesLen = entryTuples.length; entryIdx < entriesLen; entryIdx++) {\n var _a = entryTuples[entryIdx], first = _a[0], second = _a[1], third = _a[2];\n var firstBytes = reverseArray(bytesFor(first));\n var secondBytes = reverseArray(bytesFor(second));\n var thirdBytes = reverseArray(bytesFor(third));\n for (var idx = byteWidths[0] - 1; idx >= 0; idx--) {\n buffer[offset++] = firstBytes[idx] || 0;\n }\n for (var idx = byteWidths[1] - 1; idx >= 0; idx--) {\n buffer[offset++] = secondBytes[idx] || 0;\n }\n for (var idx = byteWidths[2] - 1; idx >= 0; idx--) {\n buffer[offset++] = thirdBytes[idx] || 0;\n }\n }\n return buffer;\n };\n PDFCrossRefStream.prototype.getUnencodedContentsSize = function () {\n var byteWidths = this.maxByteWidthsCache.access();\n var entryWidth = sum(byteWidths);\n return entryWidth * this.entries.length;\n };\n PDFCrossRefStream.prototype.updateDict = function () {\n _super.prototype.updateDict.call(this);\n var byteWidths = this.maxByteWidthsCache.access();\n var index = this.indexCache.access();\n var context = this.dict.context;\n this.dict.set(PDFName.of('W'), context.obj(byteWidths));\n this.dict.set(PDFName.of('Index'), context.obj(index));\n };\n PDFCrossRefStream.create = function (dict, encode) {\n if (encode === void 0) { encode = true; }\n var stream = new PDFCrossRefStream(dict, [], encode);\n stream.addDeletedEntry(PDFRef.of(0, 65535), 0);\n return stream;\n };\n PDFCrossRefStream.of = function (dict, entries, encode) {\n if (encode === void 0) { encode = true; }\n return new PDFCrossRefStream(dict, entries, encode);\n };\n return PDFCrossRefStream;\n}(PDFFlateStream));\nexport default PDFCrossRefStream;\n//# sourceMappingURL=PDFCrossRefStream.js.map","import { __extends } from \"tslib\";\nimport pako from 'pako';\nimport { MethodNotImplementedError } from \"../errors\";\nimport PDFName from \"../objects/PDFName\";\nimport PDFStream from \"../objects/PDFStream\";\nimport { Cache } from \"../../utils\";\nvar PDFFlateStream = /** @class */ (function (_super) {\n __extends(PDFFlateStream, _super);\n function PDFFlateStream(dict, encode) {\n var _this = _super.call(this, dict) || this;\n _this.computeContents = function () {\n var unencodedContents = _this.getUnencodedContents();\n return _this.encode ? pako.deflate(unencodedContents) : unencodedContents;\n };\n _this.encode = encode;\n if (encode)\n dict.set(PDFName.of('Filter'), PDFName.of('FlateDecode'));\n _this.contentsCache = Cache.populatedBy(_this.computeContents);\n return _this;\n }\n PDFFlateStream.prototype.getContents = function () {\n return this.contentsCache.access();\n };\n PDFFlateStream.prototype.getContentsSize = function () {\n return this.contentsCache.access().length;\n };\n PDFFlateStream.prototype.getUnencodedContents = function () {\n throw new MethodNotImplementedError(this.constructor.name, 'getUnencodedContents');\n };\n return PDFFlateStream;\n}(PDFStream));\nexport default PDFFlateStream;\n//# sourceMappingURL=PDFFlateStream.js.map","import { __extends } from \"tslib\";\nimport PDFFlateStream from \"./PDFFlateStream\";\nimport CharCodes from \"../syntax/CharCodes\";\nvar PDFContentStream = /** @class */ (function (_super) {\n __extends(PDFContentStream, _super);\n function PDFContentStream(dict, operators, encode) {\n if (encode === void 0) { encode = true; }\n var _this = _super.call(this, dict, encode) || this;\n _this.operators = operators;\n return _this;\n }\n PDFContentStream.prototype.push = function () {\n var _a;\n var operators = [];\n for (var _i = 0; _i < arguments.length; _i++) {\n operators[_i] = arguments[_i];\n }\n (_a = this.operators).push.apply(_a, operators);\n };\n PDFContentStream.prototype.clone = function (context) {\n var operators = new Array(this.operators.length);\n for (var idx = 0, len = this.operators.length; idx < len; idx++) {\n operators[idx] = this.operators[idx].clone(context);\n }\n var _a = this, dict = _a.dict, encode = _a.encode;\n return PDFContentStream.of(dict.clone(context), operators, encode);\n };\n PDFContentStream.prototype.getContentsString = function () {\n var value = '';\n for (var idx = 0, len = this.operators.length; idx < len; idx++) {\n value += this.operators[idx] + \"\\n\";\n }\n return value;\n };\n PDFContentStream.prototype.getUnencodedContents = function () {\n var buffer = new Uint8Array(this.getUnencodedContentsSize());\n var offset = 0;\n for (var idx = 0, len = this.operators.length; idx < len; idx++) {\n offset += this.operators[idx].copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n }\n return buffer;\n };\n PDFContentStream.prototype.getUnencodedContentsSize = function () {\n var size = 0;\n for (var idx = 0, len = this.operators.length; idx < len; idx++) {\n size += this.operators[idx].sizeInBytes() + 1;\n }\n return size;\n };\n PDFContentStream.of = function (dict, operators, encode) {\n if (encode === void 0) { encode = true; }\n return new PDFContentStream(dict, operators, encode);\n };\n return PDFContentStream;\n}(PDFFlateStream));\nexport default PDFContentStream;\n//# sourceMappingURL=PDFContentStream.js.map","/**\n * Generates a pseudo random number. Although it is not cryptographically secure\n * and uniformly distributed, it is not a concern for the intended use-case,\n * which is to generate distinct numbers.\n *\n * Credit: https://stackoverflow.com/a/19303725/10254049\n */\nvar SimpleRNG = /** @class */ (function () {\n function SimpleRNG(seed) {\n this.seed = seed;\n }\n SimpleRNG.prototype.nextInt = function () {\n var x = Math.sin(this.seed++) * 10000;\n return x - Math.floor(x);\n };\n SimpleRNG.withSeed = function (seed) { return new SimpleRNG(seed); };\n return SimpleRNG;\n}());\nexport { SimpleRNG };\n//# sourceMappingURL=rng.js.map","import { __assign } from \"tslib\";\nimport pako from 'pako';\nimport PDFHeader from \"./document/PDFHeader\";\nimport { UnexpectedObjectTypeError } from \"./errors\";\nimport PDFArray from \"./objects/PDFArray\";\nimport PDFBool from \"./objects/PDFBool\";\nimport PDFDict from \"./objects/PDFDict\";\nimport PDFName from \"./objects/PDFName\";\nimport PDFNull from \"./objects/PDFNull\";\nimport PDFNumber from \"./objects/PDFNumber\";\nimport PDFObject from \"./objects/PDFObject\";\nimport PDFRawStream from \"./objects/PDFRawStream\";\nimport PDFRef from \"./objects/PDFRef\";\nimport PDFOperator from \"./operators/PDFOperator\";\nimport Ops from \"./operators/PDFOperatorNames\";\nimport PDFContentStream from \"./structures/PDFContentStream\";\nimport { typedArrayFor } from \"../utils\";\nimport { SimpleRNG } from \"../utils/rng\";\nvar byAscendingObjectNumber = function (_a, _b) {\n var a = _a[0];\n var b = _b[0];\n return a.objectNumber - b.objectNumber;\n};\nvar PDFContext = /** @class */ (function () {\n function PDFContext() {\n this.largestObjectNumber = 0;\n this.header = PDFHeader.forVersion(1, 7);\n this.trailerInfo = {};\n this.indirectObjects = new Map();\n this.rng = SimpleRNG.withSeed(1);\n }\n PDFContext.prototype.assign = function (ref, object) {\n this.indirectObjects.set(ref, object);\n if (ref.objectNumber > this.largestObjectNumber) {\n this.largestObjectNumber = ref.objectNumber;\n }\n };\n PDFContext.prototype.nextRef = function () {\n this.largestObjectNumber += 1;\n return PDFRef.of(this.largestObjectNumber);\n };\n PDFContext.prototype.register = function (object) {\n var ref = this.nextRef();\n this.assign(ref, object);\n return ref;\n };\n PDFContext.prototype.delete = function (ref) {\n return this.indirectObjects.delete(ref);\n };\n PDFContext.prototype.lookupMaybe = function (ref) {\n var types = [];\n for (var _i = 1; _i < arguments.length; _i++) {\n types[_i - 1] = arguments[_i];\n }\n // TODO: `preservePDFNull` is for backwards compatibility. Should be\n // removed in next breaking API change.\n var preservePDFNull = types.includes(PDFNull);\n var result = ref instanceof PDFRef ? this.indirectObjects.get(ref) : ref;\n if (!result || (result === PDFNull && !preservePDFNull))\n return undefined;\n for (var idx = 0, len = types.length; idx < len; idx++) {\n var type = types[idx];\n if (type === PDFNull) {\n if (result === PDFNull)\n return result;\n }\n else {\n if (result instanceof type)\n return result;\n }\n }\n throw new UnexpectedObjectTypeError(types, result);\n };\n PDFContext.prototype.lookup = function (ref) {\n var types = [];\n for (var _i = 1; _i < arguments.length; _i++) {\n types[_i - 1] = arguments[_i];\n }\n var result = ref instanceof PDFRef ? this.indirectObjects.get(ref) : ref;\n if (types.length === 0)\n return result;\n for (var idx = 0, len = types.length; idx < len; idx++) {\n var type = types[idx];\n if (type === PDFNull) {\n if (result === PDFNull)\n return result;\n }\n else {\n if (result instanceof type)\n return result;\n }\n }\n throw new UnexpectedObjectTypeError(types, result);\n };\n PDFContext.prototype.getObjectRef = function (pdfObject) {\n var entries = Array.from(this.indirectObjects.entries());\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], ref = _a[0], object = _a[1];\n if (object === pdfObject) {\n return ref;\n }\n }\n return undefined;\n };\n PDFContext.prototype.enumerateIndirectObjects = function () {\n return Array.from(this.indirectObjects.entries()).sort(byAscendingObjectNumber);\n };\n PDFContext.prototype.obj = function (literal) {\n if (literal instanceof PDFObject) {\n return literal;\n }\n else if (literal === null || literal === undefined) {\n return PDFNull;\n }\n else if (typeof literal === 'string') {\n return PDFName.of(literal);\n }\n else if (typeof literal === 'number') {\n return PDFNumber.of(literal);\n }\n else if (typeof literal === 'boolean') {\n return literal ? PDFBool.True : PDFBool.False;\n }\n else if (Array.isArray(literal)) {\n var array = PDFArray.withContext(this);\n for (var idx = 0, len = literal.length; idx < len; idx++) {\n array.push(this.obj(literal[idx]));\n }\n return array;\n }\n else {\n var dict = PDFDict.withContext(this);\n var keys = Object.keys(literal);\n for (var idx = 0, len = keys.length; idx < len; idx++) {\n var key = keys[idx];\n var value = literal[key];\n if (value !== undefined)\n dict.set(PDFName.of(key), this.obj(value));\n }\n return dict;\n }\n };\n PDFContext.prototype.stream = function (contents, dict) {\n if (dict === void 0) { dict = {}; }\n return PDFRawStream.of(this.obj(dict), typedArrayFor(contents));\n };\n PDFContext.prototype.flateStream = function (contents, dict) {\n if (dict === void 0) { dict = {}; }\n return this.stream(pako.deflate(typedArrayFor(contents)), __assign(__assign({}, dict), { Filter: 'FlateDecode' }));\n };\n PDFContext.prototype.contentStream = function (operators, dict) {\n if (dict === void 0) { dict = {}; }\n return PDFContentStream.of(this.obj(dict), operators);\n };\n PDFContext.prototype.formXObject = function (operators, dict) {\n if (dict === void 0) { dict = {}; }\n return this.contentStream(operators, __assign(__assign({ BBox: this.obj([0, 0, 0, 0]), Matrix: this.obj([1, 0, 0, 1, 0, 0]) }, dict), { Type: 'XObject', Subtype: 'Form' }));\n };\n /*\n * Reference to PDFContentStream that contains a single PDFOperator: `q`.\n * Used by [[PDFPageLeaf]] instances to ensure that when content streams are\n * added to a modified PDF, they start in the default, unchanged graphics\n * state.\n */\n PDFContext.prototype.getPushGraphicsStateContentStream = function () {\n if (this.pushGraphicsStateContentStreamRef) {\n return this.pushGraphicsStateContentStreamRef;\n }\n var dict = this.obj({});\n var op = PDFOperator.of(Ops.PushGraphicsState);\n var stream = PDFContentStream.of(dict, [op]);\n this.pushGraphicsStateContentStreamRef = this.register(stream);\n return this.pushGraphicsStateContentStreamRef;\n };\n /*\n * Reference to PDFContentStream that contains a single PDFOperator: `Q`.\n * Used by [[PDFPageLeaf]] instances to ensure that when content streams are\n * added to a modified PDF, they start in the default, unchanged graphics\n * state.\n */\n PDFContext.prototype.getPopGraphicsStateContentStream = function () {\n if (this.popGraphicsStateContentStreamRef) {\n return this.popGraphicsStateContentStreamRef;\n }\n var dict = this.obj({});\n var op = PDFOperator.of(Ops.PopGraphicsState);\n var stream = PDFContentStream.of(dict, [op]);\n this.popGraphicsStateContentStreamRef = this.register(stream);\n return this.popGraphicsStateContentStreamRef;\n };\n PDFContext.prototype.addRandomSuffix = function (prefix, suffixLength) {\n if (suffixLength === void 0) { suffixLength = 4; }\n return prefix + \"-\" + Math.floor(this.rng.nextInt() * Math.pow(10, suffixLength));\n };\n PDFContext.create = function () { return new PDFContext(); };\n return PDFContext;\n}());\nexport default PDFContext;\n//# sourceMappingURL=PDFContext.js.map","import { __extends } from \"tslib\";\nimport PDFArray from \"../objects/PDFArray\";\nimport PDFDict from \"../objects/PDFDict\";\nimport PDFName from \"../objects/PDFName\";\nimport PDFNumber from \"../objects/PDFNumber\";\nimport PDFStream from \"../objects/PDFStream\";\nvar PDFPageLeaf = /** @class */ (function (_super) {\n __extends(PDFPageLeaf, _super);\n function PDFPageLeaf(map, context, autoNormalizeCTM) {\n if (autoNormalizeCTM === void 0) { autoNormalizeCTM = true; }\n var _this = _super.call(this, map, context) || this;\n _this.normalized = false;\n _this.autoNormalizeCTM = autoNormalizeCTM;\n return _this;\n }\n PDFPageLeaf.prototype.clone = function (context) {\n var clone = PDFPageLeaf.fromMapWithContext(new Map(), context || this.context, this.autoNormalizeCTM);\n var entries = this.entries();\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], key = _a[0], value = _a[1];\n clone.set(key, value);\n }\n return clone;\n };\n PDFPageLeaf.prototype.Parent = function () {\n return this.lookupMaybe(PDFName.Parent, PDFDict);\n };\n PDFPageLeaf.prototype.Contents = function () {\n return this.lookup(PDFName.of('Contents'));\n };\n PDFPageLeaf.prototype.Annots = function () {\n return this.lookupMaybe(PDFName.Annots, PDFArray);\n };\n PDFPageLeaf.prototype.BleedBox = function () {\n return this.lookupMaybe(PDFName.BleedBox, PDFArray);\n };\n PDFPageLeaf.prototype.TrimBox = function () {\n return this.lookupMaybe(PDFName.TrimBox, PDFArray);\n };\n PDFPageLeaf.prototype.ArtBox = function () {\n return this.lookupMaybe(PDFName.ArtBox, PDFArray);\n };\n PDFPageLeaf.prototype.Resources = function () {\n var dictOrRef = this.getInheritableAttribute(PDFName.Resources);\n return this.context.lookupMaybe(dictOrRef, PDFDict);\n };\n PDFPageLeaf.prototype.MediaBox = function () {\n var arrayOrRef = this.getInheritableAttribute(PDFName.MediaBox);\n return this.context.lookup(arrayOrRef, PDFArray);\n };\n PDFPageLeaf.prototype.CropBox = function () {\n var arrayOrRef = this.getInheritableAttribute(PDFName.CropBox);\n return this.context.lookupMaybe(arrayOrRef, PDFArray);\n };\n PDFPageLeaf.prototype.Rotate = function () {\n var numberOrRef = this.getInheritableAttribute(PDFName.Rotate);\n return this.context.lookupMaybe(numberOrRef, PDFNumber);\n };\n PDFPageLeaf.prototype.getInheritableAttribute = function (name) {\n var attribute;\n this.ascend(function (node) {\n if (!attribute)\n attribute = node.get(name);\n });\n return attribute;\n };\n PDFPageLeaf.prototype.setParent = function (parentRef) {\n this.set(PDFName.Parent, parentRef);\n };\n PDFPageLeaf.prototype.addContentStream = function (contentStreamRef) {\n var Contents = this.normalizedEntries().Contents || this.context.obj([]);\n this.set(PDFName.Contents, Contents);\n Contents.push(contentStreamRef);\n };\n PDFPageLeaf.prototype.wrapContentStreams = function (startStream, endStream) {\n var Contents = this.Contents();\n if (Contents instanceof PDFArray) {\n Contents.insert(0, startStream);\n Contents.push(endStream);\n return true;\n }\n return false;\n };\n PDFPageLeaf.prototype.addAnnot = function (annotRef) {\n var Annots = this.normalizedEntries().Annots;\n Annots.push(annotRef);\n };\n PDFPageLeaf.prototype.removeAnnot = function (annotRef) {\n var Annots = this.normalizedEntries().Annots;\n var index = Annots.indexOf(annotRef);\n if (index !== undefined) {\n Annots.remove(index);\n }\n };\n PDFPageLeaf.prototype.setFontDictionary = function (name, fontDictRef) {\n var Font = this.normalizedEntries().Font;\n Font.set(name, fontDictRef);\n };\n PDFPageLeaf.prototype.newFontDictionaryKey = function (tag) {\n var Font = this.normalizedEntries().Font;\n return Font.uniqueKey(tag);\n };\n PDFPageLeaf.prototype.newFontDictionary = function (tag, fontDictRef) {\n var key = this.newFontDictionaryKey(tag);\n this.setFontDictionary(key, fontDictRef);\n return key;\n };\n PDFPageLeaf.prototype.setXObject = function (name, xObjectRef) {\n var XObject = this.normalizedEntries().XObject;\n XObject.set(name, xObjectRef);\n };\n PDFPageLeaf.prototype.newXObjectKey = function (tag) {\n var XObject = this.normalizedEntries().XObject;\n return XObject.uniqueKey(tag);\n };\n PDFPageLeaf.prototype.newXObject = function (tag, xObjectRef) {\n var key = this.newXObjectKey(tag);\n this.setXObject(key, xObjectRef);\n return key;\n };\n PDFPageLeaf.prototype.setExtGState = function (name, extGStateRef) {\n var ExtGState = this.normalizedEntries().ExtGState;\n ExtGState.set(name, extGStateRef);\n };\n PDFPageLeaf.prototype.newExtGStateKey = function (tag) {\n var ExtGState = this.normalizedEntries().ExtGState;\n return ExtGState.uniqueKey(tag);\n };\n PDFPageLeaf.prototype.newExtGState = function (tag, extGStateRef) {\n var key = this.newExtGStateKey(tag);\n this.setExtGState(key, extGStateRef);\n return key;\n };\n PDFPageLeaf.prototype.ascend = function (visitor) {\n visitor(this);\n var Parent = this.Parent();\n if (Parent)\n Parent.ascend(visitor);\n };\n PDFPageLeaf.prototype.normalize = function () {\n if (this.normalized)\n return;\n var context = this.context;\n var contentsRef = this.get(PDFName.Contents);\n var contents = this.context.lookup(contentsRef);\n if (contents instanceof PDFStream) {\n this.set(PDFName.Contents, context.obj([contentsRef]));\n }\n if (this.autoNormalizeCTM) {\n this.wrapContentStreams(this.context.getPushGraphicsStateContentStream(), this.context.getPopGraphicsStateContentStream());\n }\n // TODO: Clone `Resources` if it is inherited\n var dictOrRef = this.getInheritableAttribute(PDFName.Resources);\n var Resources = context.lookupMaybe(dictOrRef, PDFDict) || context.obj({});\n this.set(PDFName.Resources, Resources);\n // TODO: Clone `Font` if it is inherited\n var Font = Resources.lookupMaybe(PDFName.Font, PDFDict) || context.obj({});\n Resources.set(PDFName.Font, Font);\n // TODO: Clone `XObject` if it is inherited\n var XObject = Resources.lookupMaybe(PDFName.XObject, PDFDict) || context.obj({});\n Resources.set(PDFName.XObject, XObject);\n // TODO: Clone `ExtGState` if it is inherited\n var ExtGState = Resources.lookupMaybe(PDFName.ExtGState, PDFDict) || context.obj({});\n Resources.set(PDFName.ExtGState, ExtGState);\n var Annots = this.Annots() || context.obj([]);\n this.set(PDFName.Annots, Annots);\n this.normalized = true;\n };\n PDFPageLeaf.prototype.normalizedEntries = function () {\n this.normalize();\n var Annots = this.Annots();\n var Resources = this.Resources();\n var Contents = this.Contents();\n return {\n Annots: Annots,\n Resources: Resources,\n Contents: Contents,\n Font: Resources.lookup(PDFName.Font, PDFDict),\n XObject: Resources.lookup(PDFName.XObject, PDFDict),\n ExtGState: Resources.lookup(PDFName.ExtGState, PDFDict),\n };\n };\n PDFPageLeaf.InheritableEntries = [\n 'Resources',\n 'MediaBox',\n 'CropBox',\n 'Rotate',\n ];\n PDFPageLeaf.withContextAndParent = function (context, parent) {\n var dict = new Map();\n dict.set(PDFName.Type, PDFName.Page);\n dict.set(PDFName.Parent, parent);\n dict.set(PDFName.Resources, context.obj({}));\n dict.set(PDFName.MediaBox, context.obj([0, 0, 612, 792]));\n return new PDFPageLeaf(dict, context, false);\n };\n PDFPageLeaf.fromMapWithContext = function (map, context, autoNormalizeCTM) {\n if (autoNormalizeCTM === void 0) { autoNormalizeCTM = true; }\n return new PDFPageLeaf(map, context, autoNormalizeCTM);\n };\n return PDFPageLeaf;\n}(PDFDict));\nexport default PDFPageLeaf;\n//# sourceMappingURL=PDFPageLeaf.js.map","import PDFArray from \"./objects/PDFArray\";\nimport PDFDict from \"./objects/PDFDict\";\nimport PDFName from \"./objects/PDFName\";\nimport PDFRef from \"./objects/PDFRef\";\nimport PDFStream from \"./objects/PDFStream\";\nimport PDFPageLeaf from \"./structures/PDFPageLeaf\";\n/**\n * PDFObjectCopier copies PDFObjects from a src context to a dest context.\n * The primary use case for this is to copy pages between PDFs.\n *\n * _Copying_ an object with a PDFObjectCopier is different from _cloning_ an\n * object with its [[PDFObject.clone]] method:\n *\n * ```\n * const src: PDFContext = ...\n * const dest: PDFContext = ...\n * const originalObject: PDFObject = ...\n * const copiedObject = PDFObjectCopier.for(src, dest).copy(originalObject);\n * const clonedObject = originalObject.clone();\n * ```\n *\n * Copying an object is equivalent to cloning it and then copying over any other\n * objects that it references. Note that only dictionaries, arrays, and streams\n * (or structures build from them) can contain indirect references to other\n * objects. Copying a PDFObject that is not a dictionary, array, or stream is\n * supported, but is equivalent to cloning it.\n */\nvar PDFObjectCopier = /** @class */ (function () {\n function PDFObjectCopier(src, dest) {\n var _this = this;\n this.traversedObjects = new Map();\n // prettier-ignore\n this.copy = function (object) { return (object instanceof PDFPageLeaf ? _this.copyPDFPage(object)\n : object instanceof PDFDict ? _this.copyPDFDict(object)\n : object instanceof PDFArray ? _this.copyPDFArray(object)\n : object instanceof PDFStream ? _this.copyPDFStream(object)\n : object instanceof PDFRef ? _this.copyPDFIndirectObject(object)\n : object.clone()); };\n this.copyPDFPage = function (originalPage) {\n var clonedPage = originalPage.clone();\n // Move any entries that the originalPage is inheriting from its parent\n // tree nodes directly into originalPage so they are preserved during\n // the copy.\n var InheritableEntries = PDFPageLeaf.InheritableEntries;\n for (var idx = 0, len = InheritableEntries.length; idx < len; idx++) {\n var key = PDFName.of(InheritableEntries[idx]);\n var value = clonedPage.getInheritableAttribute(key);\n if (!clonedPage.get(key) && value)\n clonedPage.set(key, value);\n }\n // Remove the parent reference to prevent the whole donor document's page\n // tree from being copied when we only need a single page.\n clonedPage.delete(PDFName.of('Parent'));\n return _this.copyPDFDict(clonedPage);\n };\n this.copyPDFDict = function (originalDict) {\n if (_this.traversedObjects.has(originalDict)) {\n return _this.traversedObjects.get(originalDict);\n }\n var clonedDict = originalDict.clone(_this.dest);\n _this.traversedObjects.set(originalDict, clonedDict);\n var entries = originalDict.entries();\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], key = _a[0], value = _a[1];\n clonedDict.set(key, _this.copy(value));\n }\n return clonedDict;\n };\n this.copyPDFArray = function (originalArray) {\n if (_this.traversedObjects.has(originalArray)) {\n return _this.traversedObjects.get(originalArray);\n }\n var clonedArray = originalArray.clone(_this.dest);\n _this.traversedObjects.set(originalArray, clonedArray);\n for (var idx = 0, len = originalArray.size(); idx < len; idx++) {\n var value = originalArray.get(idx);\n clonedArray.set(idx, _this.copy(value));\n }\n return clonedArray;\n };\n this.copyPDFStream = function (originalStream) {\n if (_this.traversedObjects.has(originalStream)) {\n return _this.traversedObjects.get(originalStream);\n }\n var clonedStream = originalStream.clone(_this.dest);\n _this.traversedObjects.set(originalStream, clonedStream);\n var entries = originalStream.dict.entries();\n for (var idx = 0, len = entries.length; idx < len; idx++) {\n var _a = entries[idx], key = _a[0], value = _a[1];\n clonedStream.dict.set(key, _this.copy(value));\n }\n return clonedStream;\n };\n this.copyPDFIndirectObject = function (ref) {\n var alreadyMapped = _this.traversedObjects.has(ref);\n if (!alreadyMapped) {\n var newRef = _this.dest.nextRef();\n _this.traversedObjects.set(ref, newRef);\n var dereferencedValue = _this.src.lookup(ref);\n if (dereferencedValue) {\n var cloned = _this.copy(dereferencedValue);\n _this.dest.assign(newRef, cloned);\n }\n }\n return _this.traversedObjects.get(ref);\n };\n this.src = src;\n this.dest = dest;\n }\n PDFObjectCopier.for = function (src, dest) {\n return new PDFObjectCopier(src, dest);\n };\n return PDFObjectCopier;\n}());\nexport default PDFObjectCopier;\n//# sourceMappingURL=PDFObjectCopier.js.map","import PDFRef from \"../objects/PDFRef\";\nimport CharCodes from \"../syntax/CharCodes\";\nimport { copyStringIntoBuffer, padStart } from \"../../utils\";\n/**\n * Entries should be added using the [[addEntry]] and [[addDeletedEntry]]\n * methods **in order of ascending object number**.\n */\nvar PDFCrossRefSection = /** @class */ (function () {\n function PDFCrossRefSection(firstEntry) {\n this.subsections = firstEntry ? [[firstEntry]] : [];\n this.chunkIdx = 0;\n this.chunkLength = firstEntry ? 1 : 0;\n }\n PDFCrossRefSection.prototype.addEntry = function (ref, offset) {\n this.append({ ref: ref, offset: offset, deleted: false });\n };\n PDFCrossRefSection.prototype.addDeletedEntry = function (ref, nextFreeObjectNumber) {\n this.append({ ref: ref, offset: nextFreeObjectNumber, deleted: true });\n };\n PDFCrossRefSection.prototype.toString = function () {\n var section = \"xref\\n\";\n for (var rangeIdx = 0, rangeLen = this.subsections.length; rangeIdx < rangeLen; rangeIdx++) {\n var range = this.subsections[rangeIdx];\n section += range[0].ref.objectNumber + \" \" + range.length + \"\\n\";\n for (var entryIdx = 0, entryLen = range.length; entryIdx < entryLen; entryIdx++) {\n var entry = range[entryIdx];\n section += padStart(String(entry.offset), 10, '0');\n section += ' ';\n section += padStart(String(entry.ref.generationNumber), 5, '0');\n section += ' ';\n section += entry.deleted ? 'f' : 'n';\n section += ' \\n';\n }\n }\n return section;\n };\n PDFCrossRefSection.prototype.sizeInBytes = function () {\n var size = 5;\n for (var idx = 0, len = this.subsections.length; idx < len; idx++) {\n var subsection = this.subsections[idx];\n var subsectionLength = subsection.length;\n var firstEntry = subsection[0];\n size += 2;\n size += String(firstEntry.ref.objectNumber).length;\n size += String(subsectionLength).length;\n size += 20 * subsectionLength;\n }\n return size;\n };\n PDFCrossRefSection.prototype.copyBytesInto = function (buffer, offset) {\n var initialOffset = offset;\n buffer[offset++] = CharCodes.x;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.e;\n buffer[offset++] = CharCodes.f;\n buffer[offset++] = CharCodes.Newline;\n offset += this.copySubsectionsIntoBuffer(this.subsections, buffer, offset);\n return offset - initialOffset;\n };\n PDFCrossRefSection.prototype.copySubsectionsIntoBuffer = function (subsections, buffer, offset) {\n var initialOffset = offset;\n var length = subsections.length;\n for (var idx = 0; idx < length; idx++) {\n var subsection = this.subsections[idx];\n var firstObjectNumber = String(subsection[0].ref.objectNumber);\n offset += copyStringIntoBuffer(firstObjectNumber, buffer, offset);\n buffer[offset++] = CharCodes.Space;\n var rangeLength = String(subsection.length);\n offset += copyStringIntoBuffer(rangeLength, buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n offset += this.copyEntriesIntoBuffer(subsection, buffer, offset);\n }\n return offset - initialOffset;\n };\n PDFCrossRefSection.prototype.copyEntriesIntoBuffer = function (entries, buffer, offset) {\n var length = entries.length;\n for (var idx = 0; idx < length; idx++) {\n var entry = entries[idx];\n var entryOffset = padStart(String(entry.offset), 10, '0');\n offset += copyStringIntoBuffer(entryOffset, buffer, offset);\n buffer[offset++] = CharCodes.Space;\n var entryGen = padStart(String(entry.ref.generationNumber), 5, '0');\n offset += copyStringIntoBuffer(entryGen, buffer, offset);\n buffer[offset++] = CharCodes.Space;\n buffer[offset++] = entry.deleted ? CharCodes.f : CharCodes.n;\n buffer[offset++] = CharCodes.Space;\n buffer[offset++] = CharCodes.Newline;\n }\n return 20 * length;\n };\n PDFCrossRefSection.prototype.append = function (currEntry) {\n if (this.chunkLength === 0) {\n this.subsections.push([currEntry]);\n this.chunkIdx = 0;\n this.chunkLength = 1;\n return;\n }\n var chunk = this.subsections[this.chunkIdx];\n var prevEntry = chunk[this.chunkLength - 1];\n if (currEntry.ref.objectNumber - prevEntry.ref.objectNumber > 1) {\n this.subsections.push([currEntry]);\n this.chunkIdx += 1;\n this.chunkLength = 1;\n }\n else {\n chunk.push(currEntry);\n this.chunkLength += 1;\n }\n };\n PDFCrossRefSection.create = function () {\n return new PDFCrossRefSection({\n ref: PDFRef.of(0, 65535),\n offset: 0,\n deleted: true,\n });\n };\n PDFCrossRefSection.createEmpty = function () { return new PDFCrossRefSection(); };\n return PDFCrossRefSection;\n}());\nexport default PDFCrossRefSection;\n//# sourceMappingURL=PDFCrossRefSection.js.map","import CharCodes from \"../syntax/CharCodes\";\nimport { copyStringIntoBuffer } from \"../../utils\";\nvar PDFTrailer = /** @class */ (function () {\n function PDFTrailer(lastXRefOffset) {\n this.lastXRefOffset = String(lastXRefOffset);\n }\n PDFTrailer.prototype.toString = function () {\n return \"startxref\\n\" + this.lastXRefOffset + \"\\n%%EOF\";\n };\n PDFTrailer.prototype.sizeInBytes = function () {\n return 16 + this.lastXRefOffset.length;\n };\n PDFTrailer.prototype.copyBytesInto = function (buffer, offset) {\n var initialOffset = offset;\n buffer[offset++] = CharCodes.s;\n buffer[offset++] = CharCodes.t;\n buffer[offset++] = CharCodes.a;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.t;\n buffer[offset++] = CharCodes.x;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.e;\n buffer[offset++] = CharCodes.f;\n buffer[offset++] = CharCodes.Newline;\n offset += copyStringIntoBuffer(this.lastXRefOffset, buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.Percent;\n buffer[offset++] = CharCodes.Percent;\n buffer[offset++] = CharCodes.E;\n buffer[offset++] = CharCodes.O;\n buffer[offset++] = CharCodes.F;\n return offset - initialOffset;\n };\n PDFTrailer.forLastCrossRefSectionOffset = function (offset) {\n return new PDFTrailer(offset);\n };\n return PDFTrailer;\n}());\nexport default PDFTrailer;\n//# sourceMappingURL=PDFTrailer.js.map","import CharCodes from \"../syntax/CharCodes\";\nvar PDFTrailerDict = /** @class */ (function () {\n function PDFTrailerDict(dict) {\n this.dict = dict;\n }\n PDFTrailerDict.prototype.toString = function () {\n return \"trailer\\n\" + this.dict.toString();\n };\n PDFTrailerDict.prototype.sizeInBytes = function () {\n return 8 + this.dict.sizeInBytes();\n };\n PDFTrailerDict.prototype.copyBytesInto = function (buffer, offset) {\n var initialOffset = offset;\n buffer[offset++] = CharCodes.t;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.a;\n buffer[offset++] = CharCodes.i;\n buffer[offset++] = CharCodes.l;\n buffer[offset++] = CharCodes.e;\n buffer[offset++] = CharCodes.r;\n buffer[offset++] = CharCodes.Newline;\n offset += this.dict.copyBytesInto(buffer, offset);\n return offset - initialOffset;\n };\n PDFTrailerDict.of = function (dict) { return new PDFTrailerDict(dict); };\n return PDFTrailerDict;\n}());\nexport default PDFTrailerDict;\n//# sourceMappingURL=PDFTrailerDict.js.map","import { __extends } from \"tslib\";\nimport PDFName from \"../objects/PDFName\";\nimport PDFNumber from \"../objects/PDFNumber\";\nimport PDFFlateStream from \"./PDFFlateStream\";\nimport CharCodes from \"../syntax/CharCodes\";\nimport { copyStringIntoBuffer, last } from \"../../utils\";\nvar PDFObjectStream = /** @class */ (function (_super) {\n __extends(PDFObjectStream, _super);\n function PDFObjectStream(context, objects, encode) {\n if (encode === void 0) { encode = true; }\n var _this = _super.call(this, context.obj({}), encode) || this;\n _this.objects = objects;\n _this.offsets = _this.computeObjectOffsets();\n _this.offsetsString = _this.computeOffsetsString();\n _this.dict.set(PDFName.of('Type'), PDFName.of('ObjStm'));\n _this.dict.set(PDFName.of('N'), PDFNumber.of(_this.objects.length));\n _this.dict.set(PDFName.of('First'), PDFNumber.of(_this.offsetsString.length));\n return _this;\n }\n PDFObjectStream.prototype.getObjectsCount = function () {\n return this.objects.length;\n };\n PDFObjectStream.prototype.clone = function (context) {\n return PDFObjectStream.withContextAndObjects(context || this.dict.context, this.objects.slice(), this.encode);\n };\n PDFObjectStream.prototype.getContentsString = function () {\n var value = this.offsetsString;\n for (var idx = 0, len = this.objects.length; idx < len; idx++) {\n var _a = this.objects[idx], object = _a[1];\n value += object + \"\\n\";\n }\n return value;\n };\n PDFObjectStream.prototype.getUnencodedContents = function () {\n var buffer = new Uint8Array(this.getUnencodedContentsSize());\n var offset = copyStringIntoBuffer(this.offsetsString, buffer, 0);\n for (var idx = 0, len = this.objects.length; idx < len; idx++) {\n var _a = this.objects[idx], object = _a[1];\n offset += object.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n }\n return buffer;\n };\n PDFObjectStream.prototype.getUnencodedContentsSize = function () {\n return (this.offsetsString.length +\n last(this.offsets)[1] +\n last(this.objects)[1].sizeInBytes() +\n 1);\n };\n PDFObjectStream.prototype.computeOffsetsString = function () {\n var offsetsString = '';\n for (var idx = 0, len = this.offsets.length; idx < len; idx++) {\n var _a = this.offsets[idx], objectNumber = _a[0], offset = _a[1];\n offsetsString += objectNumber + \" \" + offset + \" \";\n }\n return offsetsString;\n };\n PDFObjectStream.prototype.computeObjectOffsets = function () {\n var offset = 0;\n var offsets = new Array(this.objects.length);\n for (var idx = 0, len = this.objects.length; idx < len; idx++) {\n var _a = this.objects[idx], ref = _a[0], object = _a[1];\n offsets[idx] = [ref.objectNumber, offset];\n offset += object.sizeInBytes() + 1; // '\\n'\n }\n return offsets;\n };\n PDFObjectStream.withContextAndObjects = function (context, objects, encode) {\n if (encode === void 0) { encode = true; }\n return new PDFObjectStream(context, objects, encode);\n };\n return PDFObjectStream;\n}(PDFFlateStream));\nexport default PDFObjectStream;\n//# sourceMappingURL=PDFObjectStream.js.map","import { __awaiter, __generator } from \"tslib\";\nimport PDFCrossRefSection from \"../document/PDFCrossRefSection\";\nimport PDFHeader from \"../document/PDFHeader\";\nimport PDFTrailer from \"../document/PDFTrailer\";\nimport PDFTrailerDict from \"../document/PDFTrailerDict\";\nimport PDFObjectStream from \"../structures/PDFObjectStream\";\nimport CharCodes from \"../syntax/CharCodes\";\nimport { copyStringIntoBuffer, waitForTick } from \"../../utils\";\nvar PDFWriter = /** @class */ (function () {\n function PDFWriter(context, objectsPerTick) {\n var _this = this;\n this.parsedObjects = 0;\n this.shouldWaitForTick = function (n) {\n _this.parsedObjects += n;\n return _this.parsedObjects % _this.objectsPerTick === 0;\n };\n this.context = context;\n this.objectsPerTick = objectsPerTick;\n }\n PDFWriter.prototype.serializeToBuffer = function () {\n return __awaiter(this, void 0, void 0, function () {\n var _a, size, header, indirectObjects, xref, trailerDict, trailer, offset, buffer, idx, len, _b, ref, object, objectNumber, generationNumber, n;\n return __generator(this, function (_c) {\n switch (_c.label) {\n case 0: return [4 /*yield*/, this.computeBufferSize()];\n case 1:\n _a = _c.sent(), size = _a.size, header = _a.header, indirectObjects = _a.indirectObjects, xref = _a.xref, trailerDict = _a.trailerDict, trailer = _a.trailer;\n offset = 0;\n buffer = new Uint8Array(size);\n offset += header.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.Newline;\n idx = 0, len = indirectObjects.length;\n _c.label = 2;\n case 2:\n if (!(idx < len)) return [3 /*break*/, 5];\n _b = indirectObjects[idx], ref = _b[0], object = _b[1];\n objectNumber = String(ref.objectNumber);\n offset += copyStringIntoBuffer(objectNumber, buffer, offset);\n buffer[offset++] = CharCodes.Space;\n generationNumber = String(ref.generationNumber);\n offset += copyStringIntoBuffer(generationNumber, buffer, offset);\n buffer[offset++] = CharCodes.Space;\n buffer[offset++] = CharCodes.o;\n buffer[offset++] = CharCodes.b;\n buffer[offset++] = CharCodes.j;\n buffer[offset++] = CharCodes.Newline;\n offset += object.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.e;\n buffer[offset++] = CharCodes.n;\n buffer[offset++] = CharCodes.d;\n buffer[offset++] = CharCodes.o;\n buffer[offset++] = CharCodes.b;\n buffer[offset++] = CharCodes.j;\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.Newline;\n n = object instanceof PDFObjectStream ? object.getObjectsCount() : 1;\n if (!this.shouldWaitForTick(n)) return [3 /*break*/, 4];\n return [4 /*yield*/, waitForTick()];\n case 3:\n _c.sent();\n _c.label = 4;\n case 4:\n idx++;\n return [3 /*break*/, 2];\n case 5:\n if (xref) {\n offset += xref.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n }\n if (trailerDict) {\n offset += trailerDict.copyBytesInto(buffer, offset);\n buffer[offset++] = CharCodes.Newline;\n buffer[offset++] = CharCodes.Newline;\n }\n offset += trailer.copyBytesInto(buffer, offset);\n return [2 /*return*/, buffer];\n }\n });\n });\n };\n PDFWriter.prototype.computeIndirectObjectSize = function (_a) {\n var ref = _a[0], object = _a[1];\n var refSize = ref.sizeInBytes() + 3; // 'R' -> 'obj\\n'\n var objectSize = object.sizeInBytes() + 9; // '\\nendobj\\n\\n'\n return refSize + objectSize;\n };\n PDFWriter.prototype.createTrailerDict = function () {\n return this.context.obj({\n Size: this.context.largestObjectNumber + 1,\n Root: this.context.trailerInfo.Root,\n Encrypt: this.context.trailerInfo.Encrypt,\n Info: this.context.trailerInfo.Info,\n ID: this.context.trailerInfo.ID,\n });\n };\n PDFWriter.prototype.computeBufferSize = function () {\n return __awaiter(this, void 0, void 0, function () {\n var header, size, xref, indirectObjects, idx, len, indirectObject, ref, xrefOffset, trailerDict, trailer;\n return __generator(this, function (_a) {\n switch (_a.label) {\n case 0:\n header = PDFHeader.forVersion(1, 7);\n size = header.sizeInBytes() + 2;\n xref = PDFCrossRefSection.create();\n indirectObjects = this.context.enumerateIndirectObjects();\n idx = 0, len = indirectObjects.length;\n _a.label = 1;\n case 1:\n if (!(idx < len)) return [3 /*break*/, 4];\n indirectObject = indirectObjects[idx];\n ref = indirectObject[0];\n xref.addEntry(ref, size);\n size += this.computeIndirectObjectSize(indirectObject);\n if (!this.shouldWaitForTick(1)) return [3 /*break*/, 3];\n return [4 /*yield*/, waitForTick()];\n case 2:\n _a.sent();\n _a.label = 3;\n case 3:\n idx++;\n return [3 /*break*/, 1];\n case 4:\n xrefOffset = size;\n size += xref.sizeInBytes() + 1; // '\\n'\n trailerDict = PDFTrailerDict.of(this.createTrailerDict());\n size += trailerDict.sizeInBytes() + 2; // '\\n\\n'\n trailer = PDFTrailer.forLastCrossRefSectionOffset(xrefOffset);\n size += trailer.sizeInBytes();\n return [2 /*return*/, { size: size, header: header, indirectObjects: indirectObjects, xref: xref, trailerDict: trailerDict, trailer: trailer }];\n }\n });\n });\n };\n PDFWriter.forContext = function (context, objectsPerTick) {\n return new PDFWriter(context, objectsPerTick);\n };\n return PDFWriter;\n}());\nexport default PDFWriter;\n//# sourceMappingURL=PDFWriter.js.map","import { __extends } from \"tslib\";\nimport PDFObject from \"./PDFObject\";\nvar PDFInvalidObject = /** @class */ (function (_super) {\n __extends(PDFInvalidObject, _super);\n function PDFInvalidObject(data) {\n var _this = _super.call(this) || this;\n _this.data = data;\n return _this;\n }\n PDFInvalidObject.prototype.clone = function () {\n return PDFInvalidObject.of(this.data.slice());\n };\n PDFInvalidObject.prototype.toString = function () {\n return \"PDFInvalidObject(\" + this.data.length + \" bytes)\";\n };\n PDFInvalidObject.prototype.sizeInBytes = function () {\n return this.data.length;\n };\n PDFInvalidObject.prototype.copyBytesInto = function (buffer, offset) {\n var length = this.data.length;\n for (var idx = 0; idx < length; idx++) {\n buffer[offset++] = this.data[idx];\n }\n return length;\n };\n PDFInvalidObject.of = function (data) { return new PDFInvalidObject(data); };\n return PDFInvalidObject;\n}(PDFObject));\nexport default PDFInvalidObject;\n//# sourceMappingURL=PDFInvalidObject.js.map","import { __awaiter, __generator } from \"tslib\";\nimport PDFString from \"../objects/PDFString\";\nimport PDFHexString from \"../objects/PDFHexString\";\n/**\n * From the PDF-A3 specification, section **3.1. Requirements - General**.\n * See:\n * * https://www.pdfa.org/wp-content/uploads/2018/10/PDF20_AN002-AF.pdf\n */\nexport var AFRelationship;\n(function (AFRelationship) {\n AFRelationship[\"Source\"] = \"Source\";\n AFRelationship[\"Data\"] = \"Data\";\n AFRelationship[\"Alternative\"] = \"Alternative\";\n AFRelationship[\"Supplement\"] = \"Supplement\";\n AFRelationship[\"EncryptedPayload\"] = \"EncryptedPayload\";\n AFRelationship[\"FormData\"] = \"EncryptedPayload\";\n AFRelationship[\"Schema\"] = \"Schema\";\n AFRelationship[\"Unspecified\"] = \"Unspecified\";\n})(AFRelationship || (AFRelationship = {}));\nvar FileEmbedder = /** @class */ (function () {\n function FileEmbedder(fileData, fileName, options) {\n if (options === void 0) { options = {}; }\n this.fileData = fileData;\n this.fileName = fileName;\n this.options = options;\n }\n FileEmbedder.for = function (bytes, fileName, options) {\n if (options === void 0) { options = {}; }\n return new FileEmbedder(bytes, fileName, options);\n };\n FileEmbedder.prototype.embedIntoContext = function (context, ref) {\n return __awaiter(this, void 0, void 0, function () {\n var _a, mimeType, description, creationDate, modificationDate, afRelationship, embeddedFileStream, embeddedFileStreamRef, fileSpecDict;\n return __generator(this, function (_b) {\n _a = this.options, mimeType = _a.mimeType, description = _a.description, creationDate = _a.creationDate, modificationDate = _a.modificationDate, afRelationship = _a.afRelationship;\n embeddedFileStream = context.flateStream(this.fileData, {\n Type: 'EmbeddedFile',\n Subtype: mimeType !== null && mimeType !== void 0 ? mimeType : undefined,\n Params: {\n Size: this.fileData.length,\n CreationDate: creationDate\n ? PDFString.fromDate(creationDate)\n : undefined,\n ModDate: modificationDate\n ? PDFString.fromDate(modificationDate)\n : undefined,\n },\n });\n embeddedFileStreamRef = context.register(embeddedFileStream);\n fileSpecDict = context.obj({\n Type: 'Filespec',\n F: PDFString.of(this.fileName),\n UF: PDFHexString.fromText(this.fileName),\n EF: { F: embeddedFileStreamRef },\n Desc: description ? PDFHexString.fromText(description) : undefined,\n AFRelationship: afRelationship !== null && afRelationship !== void 0 ? afRelationship : undefined,\n });\n if (ref) {\n context.assign(ref, fileSpecDict);\n return [2 /*return*/, ref];\n }\n else {\n return [2 /*return*/, context.register(fileSpecDict)];\n }\n return [2 /*return*/];\n });\n });\n };\n return FileEmbedder;\n}());\nexport default FileEmbedder;\n//# sourceMappingURL=FileEmbedder.js.map","import { __awaiter, __extends, __generator } from \"tslib\";\nimport PDFHeader from \"../document/PDFHeader\";\nimport PDFTrailer from \"../document/PDFTrailer\";\nimport PDFInvalidObject from \"../objects/PDFInvalidObject\";\nimport PDFName from \"../objects/PDFName\";\nimport PDFNumber from \"../objects/PDFNumber\";\nimport PDFRef from \"../objects/PDFRef\";\nimport PDFStream from \"../objects/PDFStream\";\nimport PDFCrossRefStream from \"../structures/PDFCrossRefStream\";\nimport PDFObjectStream from \"../structures/PDFObjectStream\";\nimport PDFWriter from \"./PDFWriter\";\nimport { last, waitForTick } from \"../../utils\";\nvar PDFStreamWriter = /** @class */ (function (_super) {\n __extends(PDFStreamWriter, _super);\n function PDFStreamWriter(context, objectsPerTick, encodeStreams, objectsPerStream) {\n var _this = _super.call(this, context, objectsPerTick) || this;\n _this.encodeStreams = encodeStreams;\n _this.objectsPerStream = objectsPerStream;\n return _this;\n }\n PDFStreamWriter.prototype.computeBufferSize = function () {\n return __awaiter(this, void 0, void 0, function () {\n var objectNumber, header, size, xrefStream, uncompressedObjects, compressedObjects, objectStreamRefs, indirectObjects, idx, len, indirectObject, ref, object, shouldNotCompress, chunk, objectStreamRef, idx, len, chunk, ref, objectStream, xrefStreamRef, xrefOffset, trailer;\n return __generator(this, function (_a) {\n switch (_a.label) {\n case 0:\n objectNumber = this.context.largestObjectNumber + 1;\n header = PDFHeader.forVersion(1, 7);\n size = header.sizeInBytes() + 2;\n xrefStream = PDFCrossRefStream.create(this.createTrailerDict(), this.encodeStreams);\n uncompressedObjects = [];\n compressedObjects = [];\n objectStreamRefs = [];\n indirectObjects = this.context.enumerateIndirectObjects();\n idx = 0, len = indirectObjects.length;\n _a.label = 1;\n case 1:\n if (!(idx < len)) return [3 /*break*/, 6];\n indirectObject = indirectObjects[idx];\n ref = indirectObject[0], object = indirectObject[1];\n shouldNotCompress = ref === this.context.trailerInfo.Encrypt ||\n object instanceof PDFStream ||\n object instanceof PDFInvalidObject ||\n ref.generationNumber !== 0;\n if (!shouldNotCompress) return [3 /*break*/, 4];\n uncompressedObjects.push(indirectObject);\n xrefStream.addUncompressedEntry(ref, size);\n size += this.computeIndirectObjectSize(indirectObject);\n if (!this.shouldWaitForTick(1)) return [3 /*break*/, 3];\n return [4 /*yield*/, waitForTick()];\n case 2:\n _a.sent();\n _a.label = 3;\n case 3: return [3 /*break*/, 5];\n case 4:\n chunk = last(compressedObjects);\n objectStreamRef = last(objectStreamRefs);\n if (!chunk || chunk.length % this.objectsPerStream === 0) {\n chunk = [];\n compressedObjects.push(chunk);\n objectStreamRef = PDFRef.of(objectNumber++);\n objectStreamRefs.push(objectStreamRef);\n }\n xrefStream.addCompressedEntry(ref, objectStreamRef, chunk.length);\n chunk.push(indirectObject);\n _a.label = 5;\n case 5:\n idx++;\n return [3 /*break*/, 1];\n case 6:\n idx = 0, len = compressedObjects.length;\n _a.label = 7;\n case 7:\n if (!(idx < len)) return [3 /*break*/, 10];\n chunk = compressedObjects[idx];\n ref = objectStreamRefs[idx];\n objectStream = PDFObjectStream.withContextAndObjects(this.context, chunk, this.encodeStreams);\n xrefStream.addUncompressedEntry(ref, size);\n size += this.computeIndirectObjectSize([ref, objectStream]);\n uncompressedObjects.push([ref, objectStream]);\n if (!this.shouldWaitForTick(chunk.length)) return [3 /*break*/, 9];\n return [4 /*yield*/, waitForTick()];\n case 8:\n _a.sent();\n _a.label = 9;\n case 9:\n idx++;\n return [3 /*break*/, 7];\n case 10:\n xrefStreamRef = PDFRef.of(objectNumber++);\n xrefStream.dict.set(PDFName.of('Size'), PDFNumber.of(objectNumber));\n xrefStream.addUncompressedEntry(xrefStreamRef, size);\n xrefOffset = size;\n size += this.computeIndirectObjectSize([xrefStreamRef, xrefStream]);\n uncompressedObjects.push([xrefStreamRef, xrefStream]);\n trailer = PDFTrailer.forLastCrossRefSectionOffset(xrefOffset);\n size += trailer.sizeInBytes();\n return [2 /*return*/, { size: size, header: header, indirectObjects: uncompressedObjects, trailer: trailer }];\n }\n });\n });\n };\n PDFStreamWriter.forContext = function (context, objectsPerTick, encodeStreams, objectsPerStream) {\n if (encodeStreams === void 0) { encodeStreams = true; }\n if (objectsPerStream === void 0) { objectsPerStream = 50; }\n return new PDFStreamWriter(context, objectsPerTick, encodeStreams, objectsPerStream);\n };\n return PDFStreamWriter;\n}(PDFWriter));\nexport default PDFStreamWriter;\n//# sourceMappingURL=PDFStreamWriter.js.map","import { __extends } from \"tslib\";\nimport PDFObject from \"./PDFObject\";\nimport CharCodes from \"../syntax/CharCodes\";\nimport { copyStringIntoBuffer, toHexStringOfMinLength, utf16Decode, utf16Encode, pdfDocEncodingDecode, parseDate, hasUtf16BOM, } from \"../../utils\";\nimport { InvalidPDFDateStringError } from \"../errors\";\nvar PDFHexString = /** @class */ (function (_super) {\n __extends(PDFHexString, _super);\n function PDFHexString(value) {\n var _this = _super.call(this) || this;\n _this.value = value;\n return _this;\n }\n PDFHexString.prototype.asBytes = function () {\n // Append a zero if the number of digits is odd. See PDF spec 7.3.4.3\n var hex = this.value + (this.value.length % 2 === 1 ? '0' : '');\n var hexLength = hex.length;\n var bytes = new Uint8Array(hex.length / 2);\n var hexOffset = 0;\n var bytesOffset = 0;\n // Interpret each pair of hex digits as a single byte\n while (hexOffset < hexLength) {\n var byte = parseInt(hex.substring(hexOffset, hexOffset + 2), 16);\n bytes[bytesOffset] = byte;\n hexOffset += 2;\n bytesOffset += 1;\n }\n return bytes;\n };\n PDFHexString.prototype.decodeText = function () {\n var bytes = this.asBytes();\n if (hasUtf16BOM(bytes))\n return utf16Decode(bytes);\n return pdfDocEncodingDecode(bytes);\n };\n PDFHexString.prototype.decodeDate = function () {\n var text = this.decodeText();\n var date = parseDate(text);\n if (!date)\n throw new InvalidPDFDateStringError(text);\n return date;\n };\n PDFHexString.prototype.asString = function () {\n return this.value;\n };\n PDFHexString.prototype.clone = function () {\n return PDFHexString.of(this.value);\n };\n PDFHexString.prototype.toString = function () {\n return \"<\" + this.value + \">\";\n };\n PDFHexString.prototype.sizeInBytes = function () {\n return this.value.length + 2;\n };\n PDFHexString.prototype.copyBytesInto = function (buffer, offset) {\n buffer[offset++] = CharCodes.LessThan;\n offset += copyStringIntoBuffer(this.value, buffer, offset);\n buffer[offset++] = CharCodes.GreaterThan;\n return this.value.length + 2;\n };\n PDFHexString.of = function (value) { return new PDFHexString(value); };\n PDFHexString.fromText = function (value) {\n var encoded = utf16Encode(value);\n var hex = '';\n for (var idx = 0, len = encoded.length; idx < len; idx++) {\n hex += toHexStringOfMinLength(encoded[idx], 4);\n }\n return new PDFHexString(hex);\n };\n return PDFHexString;\n}(PDFObject));\nexport default PDFHexString;\n//# sourceMappingURL=PDFHexString.js.map","import { Encodings, Font, FontNames, } from '@pdf-lib/standard-fonts';\nimport PDFHexString from \"../objects/PDFHexString\";\nimport { toCodePoint, toHexString } from \"../../utils\";\n/**\n * A note of thanks to the developers of https://github.com/foliojs/pdfkit, as\n * this class borrows from:\n * https://github.com/foliojs/pdfkit/blob/f91bdd61c164a72ea06be1a43dc0a412afc3925f/lib/font/afm.coffee\n */\nvar StandardFontEmbedder = /** @class */ (function () {\n function StandardFontEmbedder(fontName, customName) {\n // prettier-ignore\n this.encoding = (fontName === FontNames.ZapfDingbats ? Encodings.ZapfDingbats\n : fontName === FontNames.Symbol ? Encodings.Symbol\n : Encodings.WinAnsi);\n this.font = Font.load(fontName);\n this.fontName = this.font.FontName;\n this.customName = customName;\n }\n /**\n * Encode the JavaScript string into this font. (JavaScript encodes strings in\n * Unicode, but standard fonts use either WinAnsi, ZapfDingbats, or Symbol\n * encodings)\n */\n StandardFontEmbedder.prototype.encodeText = function (text) {\n var glyphs = this.encodeTextAsGlyphs(text);\n var hexCodes = new Array(glyphs.length);\n for (var idx = 0, len = glyphs.length; idx < len; idx++) {\n hexCodes[idx] = toHexString(glyphs[idx].code);\n }\n return PDFHexString.of(hexCodes.join(''));\n };\n StandardFontEmbedder.prototype.widthOfTextAtSize = function (text, size) {\n var glyphs = this.encodeTextAsGlyphs(text);\n var totalWidth = 0;\n for (var idx = 0, len = glyphs.length; idx < len; idx++) {\n var left = glyphs[idx].name;\n var right = (glyphs[idx + 1] || {}).name;\n var kernAmount = this.font.getXAxisKerningForPair(left, right) || 0;\n totalWidth += this.widthOfGlyph(left) + kernAmount;\n }\n var scale = size / 1000;\n return totalWidth * scale;\n };\n StandardFontEmbedder.prototype.heightOfFontAtSize = function (size, options) {\n if (options === void 0) { options = {}; }\n var _a = options.descender, descender = _a === void 0 ? true : _a;\n var _b = this.font, Ascender = _b.Ascender, Descender = _b.Descender, FontBBox = _b.FontBBox;\n var yTop = Ascender || FontBBox[3];\n var yBottom = Descender || FontBBox[1];\n var height = yTop - yBottom;\n if (!descender)\n height += Descender || 0;\n return (height / 1000) * size;\n };\n StandardFontEmbedder.prototype.sizeOfFontAtHeight = function (height) {\n var _a = this.font, Ascender = _a.Ascender, Descender = _a.Descender, FontBBox = _a.FontBBox;\n var yTop = Ascender || FontBBox[3];\n var yBottom = Descender || FontBBox[1];\n return (1000 * height) / (yTop - yBottom);\n };\n StandardFontEmbedder.prototype.embedIntoContext = function (context, ref) {\n var fontDict = context.obj({\n Type: 'Font',\n Subtype: 'Type1',\n BaseFont: this.customName || this.fontName,\n Encoding: this.encoding === Encodings.WinAnsi ? 'WinAnsiEncoding' : undefined,\n });\n if (ref) {\n context.assign(ref, fontDict);\n return ref;\n }\n else {\n return context.register(fontDict);\n }\n };\n StandardFontEmbedder.prototype.widthOfGlyph = function (glyphName) {\n // Default to 250 if font doesn't specify a width\n return this.font.getWidthOfGlyph(glyphName) || 250;\n };\n StandardFontEmbedder.prototype.encodeTextAsGlyphs = function (text) {\n var codePoints = Array.from(text);\n var glyphs = new Array(codePoints.length);\n for (var idx = 0, len = codePoints.length; idx < len; idx++) {\n var codePoint = toCodePoint(codePoints[idx]);\n glyphs[idx] = this.encoding.encodeUnicodeCodePoint(codePoint);\n }\n return glyphs;\n };\n StandardFontEmbedder.for = function (fontName, customName) {\n return new StandardFontEmbedder(fontName, customName);\n };\n return StandardFontEmbedder;\n}());\nexport default StandardFontEmbedder;\n//# sourceMappingURL=StandardFontEmbedder.js.map","import { toHexString, toHexStringOfMinLength } from \"../../utils\";\nimport { hasSurrogates, highSurrogate, isWithinBMP, lowSurrogate, } from \"../../utils/unicode\";\n/** `glyphs` should be an array of unique glyphs */\nexport var createCmap = function (glyphs, glyphId) {\n var bfChars = new Array(glyphs.length);\n for (var idx = 0, len = glyphs.length; idx < len; idx++) {\n var glyph = glyphs[idx];\n var id = cmapHexFormat(cmapHexString(glyphId(glyph)));\n var unicode = cmapHexFormat.apply(void 0, glyph.codePoints.map(cmapCodePointFormat));\n bfChars[idx] = [id, unicode];\n }\n return fillCmapTemplate(bfChars);\n};\n/* =============================== Templates ================================ */\nvar fillCmapTemplate = function (bfChars) { return \"/CIDInit /ProcSet findresource begin\\n12 dict begin\\nbegincmap\\n/CIDSystemInfo <<\\n /Registry (Adobe)\\n /Ordering (UCS)\\n /Supplement 0\\n>> def\\n/CMapName /Adobe-Identity-UCS def\\n/CMapType 2 def\\n1 begincodespacerange\\n<0000>