{ "version": 3, "sources": ["../../../node_modules/@rails/actioncable/src/adapters.js", "../../../node_modules/@rails/actioncable/src/logger.js", "../../../node_modules/@rails/actioncable/src/connection_monitor.js", "../../../node_modules/@rails/actioncable/src/internal.js", "../../../node_modules/@rails/actioncable/src/connection.js", "../../../node_modules/@rails/actioncable/src/subscription.js", "../../../node_modules/@rails/actioncable/src/subscription_guarantor.js", "../../../node_modules/@rails/actioncable/src/subscriptions.js", "../../../node_modules/@rails/actioncable/src/consumer.js", "../../../node_modules/@rails/actioncable/src/index.js", "../../../node_modules/global/window.js", "../../../node_modules/global/document.js", "../../../node_modules/safe-json-parse/tuple.js", "../../../node_modules/@babel/runtime/helpers/extends.js", "../../../node_modules/is-function/index.js", "../../../node_modules/@videojs/xhr/lib/interceptors.js", "../../../node_modules/@videojs/xhr/lib/retry.js", "../../../node_modules/@videojs/xhr/lib/http-handler.js", "../../../node_modules/@videojs/xhr/lib/index.js", "../../../node_modules/videojs-vtt.js/lib/vtt.js", "../../../node_modules/videojs-vtt.js/lib/vttcue.js", "../../../node_modules/videojs-vtt.js/lib/vttregion.js", "../../../node_modules/videojs-vtt.js/lib/browser-index.js", "../../../node_modules/url-toolkit/src/url-toolkit.js", "../../../node_modules/@xmldom/xmldom/lib/conventions.js", "../../../node_modules/@xmldom/xmldom/lib/dom.js", "../../../node_modules/@xmldom/xmldom/lib/entities.js", "../../../node_modules/@xmldom/xmldom/lib/sax.js", "../../../node_modules/@xmldom/xmldom/lib/dom-parser.js", "../../../node_modules/@xmldom/xmldom/lib/index.js", "../../../node_modules/mux.js/lib/utils/numbers.js", "../../../node_modules/mux.js/lib/tools/parse-sidx.js", "../../../node_modules/mux.js/lib/utils/clock.js", "../../../node_modules/@hotwired/turbo/dist/turbo.es2017-esm.js", "../../../node_modules/@hotwired/turbo-rails/app/javascript/turbo/cable.js", "../../../node_modules/@hotwired/turbo-rails/app/javascript/turbo/snakeize.js", "../../../node_modules/@hotwired/turbo-rails/app/javascript/turbo/cable_stream_source_element.js", "../../../node_modules/@hotwired/turbo-rails/app/javascript/turbo/fetch_requests.js", "../../../node_modules/@hotwired/turbo-rails/app/javascript/turbo/index.js", "../../../node_modules/@hotwired/stimulus/dist/stimulus.js", "../../javascript/legacy_controllers/application.js", "../../javascript/lib/environment.js", "../../javascript/legacy_controllers/authentications_controller.js", "../../javascript/legacy_controllers/collapsible_text_controller.js", "../../javascript/legacy_controllers/contacts_controller.js", "../../../node_modules/swiper/shared/ssr-window.esm.mjs", "../../../node_modules/swiper/shared/utils.mjs", "../../../node_modules/swiper/shared/swiper-core.mjs", "../../../node_modules/swiper/shared/create-element-if-not-defined.mjs", "../../../node_modules/swiper/modules/navigation.mjs", "../../../node_modules/swiper/modules/autoplay.mjs", "../../../node_modules/swiper/modules/free-mode.mjs", "../../javascript/legacy_controllers/content_nav_tabs_controller.js", "../../javascript/legacy_controllers/course_cards_controller.js", "../../javascript/legacy_controllers/flashes_controller.js", "../../javascript/legacy_controllers/language_controller.js", "../../javascript/legacy_controllers/link_controller.js", "../../javascript/legacy_controllers/list_5_controller.js", "../../../node_modules/video.js/dist/video.es.js", "../../../node_modules/@babel/runtime/helpers/esm/extends.js", "../../../node_modules/@videojs/vhs-utils/es/resolve-url.js", "../../../node_modules/m3u8-parser/node_modules/@videojs/vhs-utils/es/stream.js", "../../../node_modules/m3u8-parser/node_modules/@videojs/vhs-utils/es/decode-b64-to-uint8-array.js", "../../../node_modules/m3u8-parser/dist/m3u8-parser.es.js", "../../../node_modules/@videojs/vhs-utils/es/codecs.js", "../../../node_modules/@videojs/vhs-utils/es/media-types.js", "../../../node_modules/@videojs/vhs-utils/es/byte-helpers.js", "../../../node_modules/mpd-parser/dist/mpd-parser.es.js", "../../../node_modules/@videojs/vhs-utils/es/media-groups.js", "../../../node_modules/@videojs/vhs-utils/es/decode-b64-to-uint8-array.js", "../../../node_modules/@videojs/vhs-utils/es/id3-helpers.js", "../../../node_modules/@videojs/vhs-utils/es/opus-helpers.js", "../../../node_modules/@videojs/vhs-utils/es/mp4-helpers.js", "../../../node_modules/@videojs/vhs-utils/es/ebml-helpers.js", "../../../node_modules/@videojs/vhs-utils/es/nal-helpers.js", "../../../node_modules/@videojs/vhs-utils/es/containers.js", "../../javascript/legacy_controllers/media_player_controller.js", "../../javascript/legacy_controllers/modal_controller.js", "../../javascript/legacy_controllers/off_canvas_controller.js", "../../javascript/legacy_controllers/slider_1_controller.js", "../../../node_modules/@stripe/stripe-js/dist/index.mjs", "../../javascript/controllers/stripe_controller.js", "../../../node_modules/@floating-ui/utils/dist/floating-ui.utils.mjs", "../../../node_modules/@floating-ui/core/dist/floating-ui.core.mjs", "../../../node_modules/@floating-ui/utils/dist/floating-ui.utils.dom.mjs", "../../../node_modules/@floating-ui/dom/dist/floating-ui.dom.mjs", "../../javascript/legacy_controllers/tooltip_controller.js", "../../javascript/legacy_controllers/top_bar_controller.js", "../../javascript/legacy_controllers/users_controller.js", "../../javascript/legacy_controllers/index.js", "../../javascript/lib/turbo_bridge.js", "../../javascript/application_legacy.js"], "sourcesContent": ["export default {\n logger: typeof console !== \"undefined\" ? console : undefined,\n WebSocket: typeof WebSocket !== \"undefined\" ? WebSocket : undefined,\n}\n", "import adapters from \"./adapters\"\n\n// The logger is disabled by default. You can enable it with:\n//\n// ActionCable.logger.enabled = true\n//\n// Example:\n//\n// import * as ActionCable from '@rails/actioncable'\n//\n// ActionCable.logger.enabled = true\n// ActionCable.logger.log('Connection Established.')\n//\n\nexport default {\n log(...messages) {\n if (this.enabled) {\n messages.push(Date.now())\n adapters.logger.log(\"[ActionCable]\", ...messages)\n }\n },\n}\n", "import logger from \"./logger\"\n\n// Responsible for ensuring the cable connection is in good health by validating the heartbeat pings sent from the server, and attempting\n// revival reconnections if things go astray. Internal class, not intended for direct user manipulation.\n\nconst now = () => new Date().getTime()\n\nconst secondsSince = time => (now() - time) / 1000\n\nclass ConnectionMonitor {\n constructor(connection) {\n this.visibilityDidChange = this.visibilityDidChange.bind(this)\n this.connection = connection\n this.reconnectAttempts = 0\n }\n\n start() {\n if (!this.isRunning()) {\n this.startedAt = now()\n delete this.stoppedAt\n this.startPolling()\n addEventListener(\"visibilitychange\", this.visibilityDidChange)\n logger.log(`ConnectionMonitor started. stale threshold = ${this.constructor.staleThreshold} s`)\n }\n }\n\n stop() {\n if (this.isRunning()) {\n this.stoppedAt = now()\n this.stopPolling()\n removeEventListener(\"visibilitychange\", this.visibilityDidChange)\n logger.log(\"ConnectionMonitor stopped\")\n }\n }\n\n isRunning() {\n return this.startedAt && !this.stoppedAt\n }\n\n recordPing() {\n this.pingedAt = now()\n }\n\n recordConnect() {\n this.reconnectAttempts = 0\n this.recordPing()\n delete this.disconnectedAt\n logger.log(\"ConnectionMonitor recorded connect\")\n }\n\n recordDisconnect() {\n this.disconnectedAt = now()\n logger.log(\"ConnectionMonitor recorded disconnect\")\n }\n\n // Private\n\n startPolling() {\n this.stopPolling()\n this.poll()\n }\n\n stopPolling() {\n clearTimeout(this.pollTimeout)\n }\n\n poll() {\n this.pollTimeout = setTimeout(() => {\n this.reconnectIfStale()\n this.poll()\n }\n , this.getPollInterval())\n }\n\n getPollInterval() {\n const { staleThreshold, reconnectionBackoffRate } = this.constructor\n const backoff = Math.pow(1 + reconnectionBackoffRate, Math.min(this.reconnectAttempts, 10))\n const jitterMax = this.reconnectAttempts === 0 ? 1.0 : reconnectionBackoffRate\n const jitter = jitterMax * Math.random()\n return staleThreshold * 1000 * backoff * (1 + jitter)\n }\n\n reconnectIfStale() {\n if (this.connectionIsStale()) {\n logger.log(`ConnectionMonitor detected stale connection. reconnectAttempts = ${this.reconnectAttempts}, time stale = ${secondsSince(this.refreshedAt)} s, stale threshold = ${this.constructor.staleThreshold} s`)\n this.reconnectAttempts++\n if (this.disconnectedRecently()) {\n logger.log(`ConnectionMonitor skipping reopening recent disconnect. time disconnected = ${secondsSince(this.disconnectedAt)} s`)\n } else {\n logger.log(\"ConnectionMonitor reopening\")\n this.connection.reopen()\n }\n }\n }\n\n get refreshedAt() {\n return this.pingedAt ? this.pingedAt : this.startedAt\n }\n\n connectionIsStale() {\n return secondsSince(this.refreshedAt) > this.constructor.staleThreshold\n }\n\n disconnectedRecently() {\n return this.disconnectedAt && (secondsSince(this.disconnectedAt) < this.constructor.staleThreshold)\n }\n\n visibilityDidChange() {\n if (document.visibilityState === \"visible\") {\n setTimeout(() => {\n if (this.connectionIsStale() || !this.connection.isOpen()) {\n logger.log(`ConnectionMonitor reopening stale connection on visibilitychange. visibilityState = ${document.visibilityState}`)\n this.connection.reopen()\n }\n }\n , 200)\n }\n }\n\n}\n\nConnectionMonitor.staleThreshold = 6 // Server::Connections::BEAT_INTERVAL * 2 (missed two pings)\nConnectionMonitor.reconnectionBackoffRate = 0.15\n\nexport default ConnectionMonitor\n", "export default {\n \"message_types\": {\n \"welcome\": \"welcome\",\n \"disconnect\": \"disconnect\",\n \"ping\": \"ping\",\n \"confirmation\": \"confirm_subscription\",\n \"rejection\": \"reject_subscription\"\n },\n \"disconnect_reasons\": {\n \"unauthorized\": \"unauthorized\",\n \"invalid_request\": \"invalid_request\",\n \"server_restart\": \"server_restart\",\n \"remote\": \"remote\"\n },\n \"default_mount_path\": \"/cable\",\n \"protocols\": [\n \"actioncable-v1-json\",\n \"actioncable-unsupported\"\n ]\n}\n", "import adapters from \"./adapters\"\nimport ConnectionMonitor from \"./connection_monitor\"\nimport INTERNAL from \"./internal\"\nimport logger from \"./logger\"\n\n// Encapsulate the cable connection held by the consumer. This is an internal class not intended for direct user manipulation.\n\nconst {message_types, protocols} = INTERNAL\nconst supportedProtocols = protocols.slice(0, protocols.length - 1)\n\nconst indexOf = [].indexOf\n\nclass Connection {\n constructor(consumer) {\n this.open = this.open.bind(this)\n this.consumer = consumer\n this.subscriptions = this.consumer.subscriptions\n this.monitor = new ConnectionMonitor(this)\n this.disconnected = true\n }\n\n send(data) {\n if (this.isOpen()) {\n this.webSocket.send(JSON.stringify(data))\n return true\n } else {\n return false\n }\n }\n\n open() {\n if (this.isActive()) {\n logger.log(`Attempted to open WebSocket, but existing socket is ${this.getState()}`)\n return false\n } else {\n const socketProtocols = [...protocols, ...this.consumer.subprotocols || []]\n logger.log(`Opening WebSocket, current state is ${this.getState()}, subprotocols: ${socketProtocols}`)\n if (this.webSocket) { this.uninstallEventHandlers() }\n this.webSocket = new adapters.WebSocket(this.consumer.url, socketProtocols)\n this.installEventHandlers()\n this.monitor.start()\n return true\n }\n }\n\n close({allowReconnect} = {allowReconnect: true}) {\n if (!allowReconnect) { this.monitor.stop() }\n // Avoid closing websockets in a \"connecting\" state due to Safari 15.1+ bug. See: https://github.com/rails/rails/issues/43835#issuecomment-1002288478\n if (this.isOpen()) {\n return this.webSocket.close()\n }\n }\n\n reopen() {\n logger.log(`Reopening WebSocket, current state is ${this.getState()}`)\n if (this.isActive()) {\n try {\n return this.close()\n } catch (error) {\n logger.log(\"Failed to reopen WebSocket\", error)\n }\n finally {\n logger.log(`Reopening WebSocket in ${this.constructor.reopenDelay}ms`)\n setTimeout(this.open, this.constructor.reopenDelay)\n }\n } else {\n return this.open()\n }\n }\n\n getProtocol() {\n if (this.webSocket) {\n return this.webSocket.protocol\n }\n }\n\n isOpen() {\n return this.isState(\"open\")\n }\n\n isActive() {\n return this.isState(\"open\", \"connecting\")\n }\n\n triedToReconnect() {\n return this.monitor.reconnectAttempts > 0\n }\n\n // Private\n\n isProtocolSupported() {\n return indexOf.call(supportedProtocols, this.getProtocol()) >= 0\n }\n\n isState(...states) {\n return indexOf.call(states, this.getState()) >= 0\n }\n\n getState() {\n if (this.webSocket) {\n for (let state in adapters.WebSocket) {\n if (adapters.WebSocket[state] === this.webSocket.readyState) {\n return state.toLowerCase()\n }\n }\n }\n return null\n }\n\n installEventHandlers() {\n for (let eventName in this.events) {\n const handler = this.events[eventName].bind(this)\n this.webSocket[`on${eventName}`] = handler\n }\n }\n\n uninstallEventHandlers() {\n for (let eventName in this.events) {\n this.webSocket[`on${eventName}`] = function() {}\n }\n }\n\n}\n\nConnection.reopenDelay = 500\n\nConnection.prototype.events = {\n message(event) {\n if (!this.isProtocolSupported()) { return }\n const {identifier, message, reason, reconnect, type} = JSON.parse(event.data)\n switch (type) {\n case message_types.welcome:\n if (this.triedToReconnect()) {\n this.reconnectAttempted = true\n }\n this.monitor.recordConnect()\n return this.subscriptions.reload()\n case message_types.disconnect:\n logger.log(`Disconnecting. Reason: ${reason}`)\n return this.close({allowReconnect: reconnect})\n case message_types.ping:\n return this.monitor.recordPing()\n case message_types.confirmation:\n this.subscriptions.confirmSubscription(identifier)\n if (this.reconnectAttempted) {\n this.reconnectAttempted = false\n return this.subscriptions.notify(identifier, \"connected\", {reconnected: true})\n } else {\n return this.subscriptions.notify(identifier, \"connected\", {reconnected: false})\n }\n case message_types.rejection:\n return this.subscriptions.reject(identifier)\n default:\n return this.subscriptions.notify(identifier, \"received\", message)\n }\n },\n\n open() {\n logger.log(`WebSocket onopen event, using '${this.getProtocol()}' subprotocol`)\n this.disconnected = false\n if (!this.isProtocolSupported()) {\n logger.log(\"Protocol is unsupported. Stopping monitor and disconnecting.\")\n return this.close({allowReconnect: false})\n }\n },\n\n close(event) {\n logger.log(\"WebSocket onclose event\")\n if (this.disconnected) { return }\n this.disconnected = true\n this.monitor.recordDisconnect()\n return this.subscriptions.notifyAll(\"disconnected\", {willAttemptReconnect: this.monitor.isRunning()})\n },\n\n error() {\n logger.log(\"WebSocket onerror event\")\n }\n}\n\nexport default Connection\n", "// A new subscription is created through the ActionCable.Subscriptions instance available on the consumer.\n// It provides a number of callbacks and a method for calling remote procedure calls on the corresponding\n// Channel instance on the server side.\n//\n// An example demonstrates the basic functionality:\n//\n// App.appearance = App.cable.subscriptions.create(\"AppearanceChannel\", {\n// connected() {\n// // Called once the subscription has been successfully completed\n// },\n//\n// disconnected({ willAttemptReconnect: boolean }) {\n// // Called when the client has disconnected with the server.\n// // The object will have an `willAttemptReconnect` property which\n// // says whether the client has the intention of attempting\n// // to reconnect.\n// },\n//\n// appear() {\n// this.perform('appear', {appearing_on: this.appearingOn()})\n// },\n//\n// away() {\n// this.perform('away')\n// },\n//\n// appearingOn() {\n// $('main').data('appearing-on')\n// }\n// })\n//\n// The methods #appear and #away forward their intent to the remote AppearanceChannel instance on the server\n// by calling the `perform` method with the first parameter being the action (which maps to AppearanceChannel#appear/away).\n// The second parameter is a hash that'll get JSON encoded and made available on the server in the data parameter.\n//\n// This is how the server component would look:\n//\n// class AppearanceChannel < ApplicationActionCable::Channel\n// def subscribed\n// current_user.appear\n// end\n//\n// def unsubscribed\n// current_user.disappear\n// end\n//\n// def appear(data)\n// current_user.appear on: data['appearing_on']\n// end\n//\n// def away\n// current_user.away\n// end\n// end\n//\n// The \"AppearanceChannel\" name is automatically mapped between the client-side subscription creation and the server-side Ruby class name.\n// The AppearanceChannel#appear/away public methods are exposed automatically to client-side invocation through the perform method.\n\nconst extend = function(object, properties) {\n if (properties != null) {\n for (let key in properties) {\n const value = properties[key]\n object[key] = value\n }\n }\n return object\n}\n\nexport default class Subscription {\n constructor(consumer, params = {}, mixin) {\n this.consumer = consumer\n this.identifier = JSON.stringify(params)\n extend(this, mixin)\n }\n\n // Perform a channel action with the optional data passed as an attribute\n perform(action, data = {}) {\n data.action = action\n return this.send(data)\n }\n\n send(data) {\n return this.consumer.send({command: \"message\", identifier: this.identifier, data: JSON.stringify(data)})\n }\n\n unsubscribe() {\n return this.consumer.subscriptions.remove(this)\n }\n}\n", "import logger from \"./logger\"\n\n// Responsible for ensuring channel subscribe command is confirmed, retrying until confirmation is received.\n// Internal class, not intended for direct user manipulation.\n\nclass SubscriptionGuarantor {\n constructor(subscriptions) {\n this.subscriptions = subscriptions\n this.pendingSubscriptions = []\n }\n\n guarantee(subscription) {\n if(this.pendingSubscriptions.indexOf(subscription) == -1){ \n logger.log(`SubscriptionGuarantor guaranteeing ${subscription.identifier}`)\n this.pendingSubscriptions.push(subscription) \n }\n else {\n logger.log(`SubscriptionGuarantor already guaranteeing ${subscription.identifier}`)\n }\n this.startGuaranteeing()\n }\n\n forget(subscription) {\n logger.log(`SubscriptionGuarantor forgetting ${subscription.identifier}`)\n this.pendingSubscriptions = (this.pendingSubscriptions.filter((s) => s !== subscription))\n }\n\n startGuaranteeing() {\n this.stopGuaranteeing()\n this.retrySubscribing()\n }\n \n stopGuaranteeing() {\n clearTimeout(this.retryTimeout)\n }\n\n retrySubscribing() {\n this.retryTimeout = setTimeout(() => {\n if (this.subscriptions && typeof(this.subscriptions.subscribe) === \"function\") {\n this.pendingSubscriptions.map((subscription) => {\n logger.log(`SubscriptionGuarantor resubscribing ${subscription.identifier}`)\n this.subscriptions.subscribe(subscription)\n })\n }\n }\n , 500)\n }\n}\n\nexport default SubscriptionGuarantor", "import Subscription from \"./subscription\"\nimport SubscriptionGuarantor from \"./subscription_guarantor\"\nimport logger from \"./logger\"\n\n// Collection class for creating (and internally managing) channel subscriptions.\n// The only method intended to be triggered by the user is ActionCable.Subscriptions#create,\n// and it should be called through the consumer like so:\n//\n// App = {}\n// App.cable = ActionCable.createConsumer(\"ws://example.com/accounts/1\")\n// App.appearance = App.cable.subscriptions.create(\"AppearanceChannel\")\n//\n// For more details on how you'd configure an actual channel subscription, see ActionCable.Subscription.\n\nexport default class Subscriptions {\n constructor(consumer) {\n this.consumer = consumer\n this.guarantor = new SubscriptionGuarantor(this)\n this.subscriptions = []\n }\n\n create(channelName, mixin) {\n const channel = channelName\n const params = typeof channel === \"object\" ? channel : {channel}\n const subscription = new Subscription(this.consumer, params, mixin)\n return this.add(subscription)\n }\n\n // Private\n\n add(subscription) {\n this.subscriptions.push(subscription)\n this.consumer.ensureActiveConnection()\n this.notify(subscription, \"initialized\")\n this.subscribe(subscription)\n return subscription\n }\n\n remove(subscription) {\n this.forget(subscription)\n if (!this.findAll(subscription.identifier).length) {\n this.sendCommand(subscription, \"unsubscribe\")\n }\n return subscription\n }\n\n reject(identifier) {\n return this.findAll(identifier).map((subscription) => {\n this.forget(subscription)\n this.notify(subscription, \"rejected\")\n return subscription\n })\n }\n\n forget(subscription) {\n this.guarantor.forget(subscription)\n this.subscriptions = (this.subscriptions.filter((s) => s !== subscription))\n return subscription\n }\n\n findAll(identifier) {\n return this.subscriptions.filter((s) => s.identifier === identifier)\n }\n\n reload() {\n return this.subscriptions.map((subscription) =>\n this.subscribe(subscription))\n }\n\n notifyAll(callbackName, ...args) {\n return this.subscriptions.map((subscription) =>\n this.notify(subscription, callbackName, ...args))\n }\n\n notify(subscription, callbackName, ...args) {\n let subscriptions\n if (typeof subscription === \"string\") {\n subscriptions = this.findAll(subscription)\n } else {\n subscriptions = [subscription]\n }\n\n return subscriptions.map((subscription) =>\n (typeof subscription[callbackName] === \"function\" ? subscription[callbackName](...args) : undefined))\n }\n\n subscribe(subscription) {\n if (this.sendCommand(subscription, \"subscribe\")) {\n this.guarantor.guarantee(subscription)\n }\n }\n\n confirmSubscription(identifier) {\n logger.log(`Subscription confirmed ${identifier}`)\n this.findAll(identifier).map((subscription) =>\n this.guarantor.forget(subscription))\n }\n\n sendCommand(subscription, command) {\n const {identifier} = subscription\n return this.consumer.send({command, identifier})\n }\n}\n", "import Connection from \"./connection\"\nimport Subscriptions from \"./subscriptions\"\n\n// The ActionCable.Consumer establishes the connection to a server-side Ruby Connection object. Once established,\n// the ActionCable.ConnectionMonitor will ensure that its properly maintained through heartbeats and checking for stale updates.\n// The Consumer instance is also the gateway to establishing subscriptions to desired channels through the #createSubscription\n// method.\n//\n// The following example shows how this can be set up:\n//\n// App = {}\n// App.cable = ActionCable.createConsumer(\"ws://example.com/accounts/1\")\n// App.appearance = App.cable.subscriptions.create(\"AppearanceChannel\")\n//\n// For more details on how you'd configure an actual channel subscription, see ActionCable.Subscription.\n//\n// When a consumer is created, it automatically connects with the server.\n//\n// To disconnect from the server, call\n//\n// App.cable.disconnect()\n//\n// and to restart the connection:\n//\n// App.cable.connect()\n//\n// Any channel subscriptions which existed prior to disconnecting will\n// automatically resubscribe.\n\nexport default class Consumer {\n constructor(url) {\n this._url = url\n this.subscriptions = new Subscriptions(this)\n this.connection = new Connection(this)\n this.subprotocols = []\n }\n\n get url() {\n return createWebSocketURL(this._url)\n }\n\n send(data) {\n return this.connection.send(data)\n }\n\n connect() {\n return this.connection.open()\n }\n\n disconnect() {\n return this.connection.close({allowReconnect: false})\n }\n\n ensureActiveConnection() {\n if (!this.connection.isActive()) {\n return this.connection.open()\n }\n }\n\n addSubProtocol(subprotocol) {\n this.subprotocols = [...this.subprotocols, subprotocol]\n }\n}\n\nexport function createWebSocketURL(url) {\n if (typeof url === \"function\") {\n url = url()\n }\n\n if (url && !/^wss?:/i.test(url)) {\n const a = document.createElement(\"a\")\n a.href = url\n // Fix populating Location properties in IE. Otherwise, protocol will be blank.\n a.href = a.href\n a.protocol = a.protocol.replace(\"http\", \"ws\")\n return a.href\n } else {\n return url\n }\n}\n", "import Connection from \"./connection\"\nimport ConnectionMonitor from \"./connection_monitor\"\nimport Consumer, { createWebSocketURL } from \"./consumer\"\nimport INTERNAL from \"./internal\"\nimport Subscription from \"./subscription\"\nimport Subscriptions from \"./subscriptions\"\nimport SubscriptionGuarantor from \"./subscription_guarantor\"\nimport adapters from \"./adapters\"\nimport logger from \"./logger\"\n\nexport {\n Connection,\n ConnectionMonitor,\n Consumer,\n INTERNAL,\n Subscription,\n Subscriptions,\n SubscriptionGuarantor,\n adapters,\n createWebSocketURL,\n logger,\n}\n\nexport function createConsumer(url = getConfig(\"url\") || INTERNAL.default_mount_path) {\n return new Consumer(url)\n}\n\nexport function getConfig(name) {\n const element = document.head.querySelector(`meta[name='action-cable-${name}']`)\n if (element) {\n return element.getAttribute(\"content\")\n }\n}\n", "var win;\n\nif (typeof window !== \"undefined\") {\n win = window;\n} else if (typeof global !== \"undefined\") {\n win = global;\n} else if (typeof self !== \"undefined\"){\n win = self;\n} else {\n win = {};\n}\n\nmodule.exports = win;\n", "var topLevel = typeof global !== 'undefined' ? global :\n typeof window !== 'undefined' ? window : {}\nvar minDoc = require('min-document');\n\nvar doccy;\n\nif (typeof document !== 'undefined') {\n doccy = document;\n} else {\n doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4'];\n\n if (!doccy) {\n doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4'] = minDoc;\n }\n}\n\nmodule.exports = doccy;\n", "module.exports = SafeParseTuple\n\nfunction SafeParseTuple(obj, reviver) {\n var json\n var error = null\n\n try {\n json = JSON.parse(obj, reviver)\n } catch (err) {\n error = err\n }\n\n return [error, json]\n}\n", "function _extends() {\n return module.exports = _extends = Object.assign ? Object.assign.bind() : function (n) {\n for (var e = 1; e < arguments.length; e++) {\n var t = arguments[e];\n for (var r in t) ({}).hasOwnProperty.call(t, r) && (n[r] = t[r]);\n }\n return n;\n }, module.exports.__esModule = true, module.exports[\"default\"] = module.exports, _extends.apply(null, arguments);\n}\nmodule.exports = _extends, module.exports.__esModule = true, module.exports[\"default\"] = module.exports;", "module.exports = isFunction\n\nvar toString = Object.prototype.toString\n\nfunction isFunction (fn) {\n if (!fn) {\n return false\n }\n var string = toString.call(fn)\n return string === '[object Function]' ||\n (typeof fn === 'function' && string !== '[object RegExp]') ||\n (typeof window !== 'undefined' &&\n // IE8 and below\n (fn === window.setTimeout ||\n fn === window.alert ||\n fn === window.confirm ||\n fn === window.prompt))\n};\n", "\"use strict\";\n\nfunction _createForOfIteratorHelperLoose(o, allowArrayLike) { var it = typeof Symbol !== \"undefined\" && o[Symbol.iterator] || o[\"@@iterator\"]; if (it) return (it = it.call(o)).next.bind(it); if (Array.isArray(o) || (it = _unsupportedIterableToArray(o)) || allowArrayLike && o && typeof o.length === \"number\") { if (it) o = it; var i = 0; return function () { if (i >= o.length) return { done: true }; return { done: false, value: o[i++] }; }; } throw new TypeError(\"Invalid attempt to iterate non-iterable instance.\\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.\"); }\n\nfunction _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === \"string\") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === \"Object\" && o.constructor) n = o.constructor.name; if (n === \"Map\" || n === \"Set\") return Array.from(o); if (n === \"Arguments\" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }\n\nfunction _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }\n\nvar InterceptorsStorage = /*#__PURE__*/function () {\n function InterceptorsStorage() {\n this.typeToInterceptorsMap_ = new Map();\n this.enabled_ = false;\n }\n\n var _proto = InterceptorsStorage.prototype;\n\n _proto.getIsEnabled = function getIsEnabled() {\n return this.enabled_;\n };\n\n _proto.enable = function enable() {\n this.enabled_ = true;\n };\n\n _proto.disable = function disable() {\n this.enabled_ = false;\n };\n\n _proto.reset = function reset() {\n this.typeToInterceptorsMap_ = new Map();\n this.enabled_ = false;\n };\n\n _proto.addInterceptor = function addInterceptor(type, interceptor) {\n if (!this.typeToInterceptorsMap_.has(type)) {\n this.typeToInterceptorsMap_.set(type, new Set());\n }\n\n var interceptorsSet = this.typeToInterceptorsMap_.get(type);\n\n if (interceptorsSet.has(interceptor)) {\n // already have this interceptor\n return false;\n }\n\n interceptorsSet.add(interceptor);\n return true;\n };\n\n _proto.removeInterceptor = function removeInterceptor(type, interceptor) {\n var interceptorsSet = this.typeToInterceptorsMap_.get(type);\n\n if (interceptorsSet && interceptorsSet.has(interceptor)) {\n interceptorsSet.delete(interceptor);\n return true;\n }\n\n return false;\n };\n\n _proto.clearInterceptorsByType = function clearInterceptorsByType(type) {\n var interceptorsSet = this.typeToInterceptorsMap_.get(type);\n\n if (!interceptorsSet) {\n return false;\n }\n\n this.typeToInterceptorsMap_.delete(type);\n this.typeToInterceptorsMap_.set(type, new Set());\n return true;\n };\n\n _proto.clear = function clear() {\n if (!this.typeToInterceptorsMap_.size) {\n return false;\n }\n\n this.typeToInterceptorsMap_ = new Map();\n return true;\n };\n\n _proto.getForType = function getForType(type) {\n return this.typeToInterceptorsMap_.get(type) || new Set();\n };\n\n _proto.execute = function execute(type, payload) {\n var interceptors = this.getForType(type);\n\n for (var _iterator = _createForOfIteratorHelperLoose(interceptors), _step; !(_step = _iterator()).done;) {\n var interceptor = _step.value;\n\n try {\n payload = interceptor(payload);\n } catch (e) {//ignore\n }\n }\n\n return payload;\n };\n\n return InterceptorsStorage;\n}();\n\nmodule.exports = InterceptorsStorage;", "\"use strict\";\n\nvar RetryManager = /*#__PURE__*/function () {\n function RetryManager() {\n this.maxAttempts_ = 1;\n this.delayFactor_ = 0.1;\n this.fuzzFactor_ = 0.1;\n this.initialDelay_ = 1000;\n this.enabled_ = false;\n }\n\n var _proto = RetryManager.prototype;\n\n _proto.getIsEnabled = function getIsEnabled() {\n return this.enabled_;\n };\n\n _proto.enable = function enable() {\n this.enabled_ = true;\n };\n\n _proto.disable = function disable() {\n this.enabled_ = false;\n };\n\n _proto.reset = function reset() {\n this.maxAttempts_ = 1;\n this.delayFactor_ = 0.1;\n this.fuzzFactor_ = 0.1;\n this.initialDelay_ = 1000;\n this.enabled_ = false;\n };\n\n _proto.getMaxAttempts = function getMaxAttempts() {\n return this.maxAttempts_;\n };\n\n _proto.setMaxAttempts = function setMaxAttempts(maxAttempts) {\n this.maxAttempts_ = maxAttempts;\n };\n\n _proto.getDelayFactor = function getDelayFactor() {\n return this.delayFactor_;\n };\n\n _proto.setDelayFactor = function setDelayFactor(delayFactor) {\n this.delayFactor_ = delayFactor;\n };\n\n _proto.getFuzzFactor = function getFuzzFactor() {\n return this.fuzzFactor_;\n };\n\n _proto.setFuzzFactor = function setFuzzFactor(fuzzFactor) {\n this.fuzzFactor_ = fuzzFactor;\n };\n\n _proto.getInitialDelay = function getInitialDelay() {\n return this.initialDelay_;\n };\n\n _proto.setInitialDelay = function setInitialDelay(initialDelay) {\n this.initialDelay_ = initialDelay;\n };\n\n _proto.createRetry = function createRetry(_temp) {\n var _ref = _temp === void 0 ? {} : _temp,\n maxAttempts = _ref.maxAttempts,\n delayFactor = _ref.delayFactor,\n fuzzFactor = _ref.fuzzFactor,\n initialDelay = _ref.initialDelay;\n\n return new Retry({\n maxAttempts: maxAttempts || this.maxAttempts_,\n delayFactor: delayFactor || this.delayFactor_,\n fuzzFactor: fuzzFactor || this.fuzzFactor_,\n initialDelay: initialDelay || this.initialDelay_\n });\n };\n\n return RetryManager;\n}();\n\nvar Retry = /*#__PURE__*/function () {\n function Retry(options) {\n this.maxAttempts_ = options.maxAttempts;\n this.delayFactor_ = options.delayFactor;\n this.fuzzFactor_ = options.fuzzFactor;\n this.currentDelay_ = options.initialDelay;\n this.currentAttempt_ = 1;\n }\n\n var _proto2 = Retry.prototype;\n\n _proto2.moveToNextAttempt = function moveToNextAttempt() {\n this.currentAttempt_++;\n var delayDelta = this.currentDelay_ * this.delayFactor_;\n this.currentDelay_ = this.currentDelay_ + delayDelta;\n };\n\n _proto2.shouldRetry = function shouldRetry() {\n return this.currentAttempt_ < this.maxAttempts_;\n };\n\n _proto2.getCurrentDelay = function getCurrentDelay() {\n return this.currentDelay_;\n };\n\n _proto2.getCurrentMinPossibleDelay = function getCurrentMinPossibleDelay() {\n return (1 - this.fuzzFactor_) * this.currentDelay_;\n };\n\n _proto2.getCurrentMaxPossibleDelay = function getCurrentMaxPossibleDelay() {\n return (1 + this.fuzzFactor_) * this.currentDelay_;\n }\n /**\n * For example fuzzFactor is 0.1\n * This means \u00B110% deviation\n * So if we have delay as 1000\n * This function can generate any value from 900 to 1100\n */\n ;\n\n _proto2.getCurrentFuzzedDelay = function getCurrentFuzzedDelay() {\n var lowValue = this.getCurrentMinPossibleDelay();\n var highValue = this.getCurrentMaxPossibleDelay();\n return lowValue + Math.random() * (highValue - lowValue);\n };\n\n return Retry;\n}();\n\nmodule.exports = RetryManager;", "\"use strict\";\n\nvar window = require('global/window');\n\nvar httpResponseHandler = function httpResponseHandler(callback, decodeResponseBody) {\n if (decodeResponseBody === void 0) {\n decodeResponseBody = false;\n }\n\n return function (err, response, responseBody) {\n // if the XHR failed, return that error\n if (err) {\n callback(err);\n return;\n } // if the HTTP status code is 4xx or 5xx, the request also failed\n\n\n if (response.statusCode >= 400 && response.statusCode <= 599) {\n var cause = responseBody;\n\n if (decodeResponseBody) {\n if (window.TextDecoder) {\n var charset = getCharset(response.headers && response.headers['content-type']);\n\n try {\n cause = new TextDecoder(charset).decode(responseBody);\n } catch (e) {}\n } else {\n cause = String.fromCharCode.apply(null, new Uint8Array(responseBody));\n }\n }\n\n callback({\n cause: cause\n });\n return;\n } // otherwise, request succeeded\n\n\n callback(null, responseBody);\n };\n};\n\nfunction getCharset(contentTypeHeader) {\n if (contentTypeHeader === void 0) {\n contentTypeHeader = '';\n }\n\n return contentTypeHeader.toLowerCase().split(';').reduce(function (charset, contentType) {\n var _contentType$split = contentType.split('='),\n type = _contentType$split[0],\n value = _contentType$split[1];\n\n if (type.trim() === 'charset') {\n return value.trim();\n }\n\n return charset;\n }, 'utf-8');\n}\n\nmodule.exports = httpResponseHandler;", "\"use strict\";\n\nvar window = require(\"global/window\");\n\nvar _extends = require(\"@babel/runtime/helpers/extends\");\n\nvar isFunction = require('is-function');\n\nvar InterceptorsStorage = require('./interceptors.js');\n\nvar RetryManager = require(\"./retry.js\");\n\ncreateXHR.httpHandler = require('./http-handler.js');\ncreateXHR.requestInterceptorsStorage = new InterceptorsStorage();\ncreateXHR.responseInterceptorsStorage = new InterceptorsStorage();\ncreateXHR.retryManager = new RetryManager();\n/**\n * @license\n * slighly modified parse-headers 2.0.2 \n * Copyright (c) 2014 David Bj\u00F6rklund\n * Available under the MIT license\n * \n */\n\nvar parseHeaders = function parseHeaders(headers) {\n var result = {};\n\n if (!headers) {\n return result;\n }\n\n headers.trim().split('\\n').forEach(function (row) {\n var index = row.indexOf(':');\n var key = row.slice(0, index).trim().toLowerCase();\n var value = row.slice(index + 1).trim();\n\n if (typeof result[key] === 'undefined') {\n result[key] = value;\n } else if (Array.isArray(result[key])) {\n result[key].push(value);\n } else {\n result[key] = [result[key], value];\n }\n });\n return result;\n};\n\nmodule.exports = createXHR; // Allow use of default import syntax in TypeScript\n\nmodule.exports.default = createXHR;\ncreateXHR.XMLHttpRequest = window.XMLHttpRequest || noop;\ncreateXHR.XDomainRequest = \"withCredentials\" in new createXHR.XMLHttpRequest() ? createXHR.XMLHttpRequest : window.XDomainRequest;\nforEachArray([\"get\", \"put\", \"post\", \"patch\", \"head\", \"delete\"], function (method) {\n createXHR[method === \"delete\" ? \"del\" : method] = function (uri, options, callback) {\n options = initParams(uri, options, callback);\n options.method = method.toUpperCase();\n return _createXHR(options);\n };\n});\n\nfunction forEachArray(array, iterator) {\n for (var i = 0; i < array.length; i++) {\n iterator(array[i]);\n }\n}\n\nfunction isEmpty(obj) {\n for (var i in obj) {\n if (obj.hasOwnProperty(i)) return false;\n }\n\n return true;\n}\n\nfunction initParams(uri, options, callback) {\n var params = uri;\n\n if (isFunction(options)) {\n callback = options;\n\n if (typeof uri === \"string\") {\n params = {\n uri: uri\n };\n }\n } else {\n params = _extends({}, options, {\n uri: uri\n });\n }\n\n params.callback = callback;\n return params;\n}\n\nfunction createXHR(uri, options, callback) {\n options = initParams(uri, options, callback);\n return _createXHR(options);\n}\n\nfunction _createXHR(options) {\n if (typeof options.callback === \"undefined\") {\n throw new Error(\"callback argument missing\");\n } // call all registered request interceptors for a given request type:\n\n\n if (options.requestType && createXHR.requestInterceptorsStorage.getIsEnabled()) {\n var requestInterceptorPayload = {\n uri: options.uri || options.url,\n headers: options.headers || {},\n body: options.body,\n metadata: options.metadata || {},\n retry: options.retry,\n timeout: options.timeout\n };\n var updatedPayload = createXHR.requestInterceptorsStorage.execute(options.requestType, requestInterceptorPayload);\n options.uri = updatedPayload.uri;\n options.headers = updatedPayload.headers;\n options.body = updatedPayload.body;\n options.metadata = updatedPayload.metadata;\n options.retry = updatedPayload.retry;\n options.timeout = updatedPayload.timeout;\n }\n\n var called = false;\n\n var callback = function cbOnce(err, response, body) {\n if (!called) {\n called = true;\n options.callback(err, response, body);\n }\n };\n\n function readystatechange() {\n // do not call load 2 times when response interceptors are enabled\n // why do we even need this 2nd load?\n if (xhr.readyState === 4 && !createXHR.responseInterceptorsStorage.getIsEnabled()) {\n setTimeout(loadFunc, 0);\n }\n }\n\n function getBody() {\n // Chrome with requestType=blob throws errors arround when even testing access to responseText\n var body = undefined;\n\n if (xhr.response) {\n body = xhr.response;\n } else {\n body = xhr.responseText || getXml(xhr);\n }\n\n if (isJson) {\n try {\n body = JSON.parse(body);\n } catch (e) {}\n }\n\n return body;\n }\n\n function errorFunc(evt) {\n clearTimeout(timeoutTimer);\n clearTimeout(options.retryTimeout);\n\n if (!(evt instanceof Error)) {\n evt = new Error(\"\" + (evt || \"Unknown XMLHttpRequest Error\"));\n }\n\n evt.statusCode = 0; // we would like to retry on error:\n\n if (!aborted && createXHR.retryManager.getIsEnabled() && options.retry && options.retry.shouldRetry()) {\n options.retryTimeout = setTimeout(function () {\n options.retry.moveToNextAttempt(); // we want to re-use the same options and the same xhr object:\n\n options.xhr = xhr;\n\n _createXHR(options);\n }, options.retry.getCurrentFuzzedDelay());\n return;\n } // call all registered response interceptors for a given request type:\n\n\n if (options.requestType && createXHR.responseInterceptorsStorage.getIsEnabled()) {\n var responseInterceptorPayload = {\n headers: failureResponse.headers || {},\n body: failureResponse.body,\n responseUrl: xhr.responseURL,\n responseType: xhr.responseType\n };\n\n var _updatedPayload = createXHR.responseInterceptorsStorage.execute(options.requestType, responseInterceptorPayload);\n\n failureResponse.body = _updatedPayload.body;\n failureResponse.headers = _updatedPayload.headers;\n }\n\n return callback(evt, failureResponse);\n } // will load the data & process the response in a special response object\n\n\n function loadFunc() {\n if (aborted) return;\n var status;\n clearTimeout(timeoutTimer);\n clearTimeout(options.retryTimeout);\n\n if (options.useXDR && xhr.status === undefined) {\n //IE8 CORS GET successful response doesn't have a status field, but body is fine\n status = 200;\n } else {\n status = xhr.status === 1223 ? 204 : xhr.status;\n }\n\n var response = failureResponse;\n var err = null;\n\n if (status !== 0) {\n response = {\n body: getBody(),\n statusCode: status,\n method: method,\n headers: {},\n url: uri,\n rawRequest: xhr\n };\n\n if (xhr.getAllResponseHeaders) {\n //remember xhr can in fact be XDR for CORS in IE\n response.headers = parseHeaders(xhr.getAllResponseHeaders());\n }\n } else {\n err = new Error(\"Internal XMLHttpRequest Error\");\n } // call all registered response interceptors for a given request type:\n\n\n if (options.requestType && createXHR.responseInterceptorsStorage.getIsEnabled()) {\n var responseInterceptorPayload = {\n headers: response.headers || {},\n body: response.body,\n responseUrl: xhr.responseURL,\n responseType: xhr.responseType\n };\n\n var _updatedPayload2 = createXHR.responseInterceptorsStorage.execute(options.requestType, responseInterceptorPayload);\n\n response.body = _updatedPayload2.body;\n response.headers = _updatedPayload2.headers;\n }\n\n return callback(err, response, response.body);\n }\n\n var xhr = options.xhr || null;\n\n if (!xhr) {\n if (options.cors || options.useXDR) {\n xhr = new createXHR.XDomainRequest();\n } else {\n xhr = new createXHR.XMLHttpRequest();\n }\n }\n\n var key;\n var aborted;\n var uri = xhr.url = options.uri || options.url;\n var method = xhr.method = options.method || \"GET\";\n var body = options.body || options.data;\n var headers = xhr.headers = options.headers || {};\n var sync = !!options.sync;\n var isJson = false;\n var timeoutTimer;\n var failureResponse = {\n body: undefined,\n headers: {},\n statusCode: 0,\n method: method,\n url: uri,\n rawRequest: xhr\n };\n\n if (\"json\" in options && options.json !== false) {\n isJson = true;\n headers[\"accept\"] || headers[\"Accept\"] || (headers[\"Accept\"] = \"application/json\"); //Don't override existing accept header declared by user\n\n if (method !== \"GET\" && method !== \"HEAD\") {\n headers[\"content-type\"] || headers[\"Content-Type\"] || (headers[\"Content-Type\"] = \"application/json\"); //Don't override existing accept header declared by user\n\n body = JSON.stringify(options.json === true ? body : options.json);\n }\n }\n\n xhr.onreadystatechange = readystatechange;\n xhr.onload = loadFunc;\n xhr.onerror = errorFunc; // IE9 must have onprogress be set to a unique function.\n\n xhr.onprogress = function () {// IE must die\n };\n\n xhr.onabort = function () {\n aborted = true;\n clearTimeout(options.retryTimeout);\n };\n\n xhr.ontimeout = errorFunc;\n xhr.open(method, uri, !sync, options.username, options.password); //has to be after open\n\n if (!sync) {\n xhr.withCredentials = !!options.withCredentials;\n } // Cannot set timeout with sync request\n // not setting timeout on the xhr object, because of old webkits etc. not handling that correctly\n // both npm's request and jquery 1.x use this kind of timeout, so this is being consistent\n\n\n if (!sync && options.timeout > 0) {\n timeoutTimer = setTimeout(function () {\n if (aborted) return;\n aborted = true; //IE9 may still call readystatechange\n\n xhr.abort(\"timeout\");\n var e = new Error(\"XMLHttpRequest timeout\");\n e.code = \"ETIMEDOUT\";\n errorFunc(e);\n }, options.timeout);\n }\n\n if (xhr.setRequestHeader) {\n for (key in headers) {\n if (headers.hasOwnProperty(key)) {\n xhr.setRequestHeader(key, headers[key]);\n }\n }\n } else if (options.headers && !isEmpty(options.headers)) {\n throw new Error(\"Headers cannot be set on an XDomainRequest object\");\n }\n\n if (\"responseType\" in options) {\n xhr.responseType = options.responseType;\n }\n\n if (\"beforeSend\" in options && typeof options.beforeSend === \"function\") {\n options.beforeSend(xhr);\n } // Microsoft Edge browser sends \"undefined\" when send is called with undefined value.\n // XMLHttpRequest spec says to pass null as body to indicate no body\n // See https://github.com/naugtur/xhr/issues/100.\n\n\n xhr.send(body || null);\n return xhr;\n}\n\nfunction getXml(xhr) {\n // xhr.responseXML will throw Exception \"InvalidStateError\" or \"DOMException\"\n // See https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest/responseXML.\n try {\n if (xhr.responseType === \"document\") {\n return xhr.responseXML;\n }\n\n var firefoxBugTakenEffect = xhr.responseXML && xhr.responseXML.documentElement.nodeName === \"parsererror\";\n\n if (xhr.responseType === \"\" && !firefoxBugTakenEffect) {\n return xhr.responseXML;\n }\n } catch (e) {}\n\n return null;\n}\n\nfunction noop() {}", "/**\n * Copyright 2013 vtt.js Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/* -*- Mode: Java; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */\n/* vim: set shiftwidth=2 tabstop=2 autoindent cindent expandtab: */\nvar document = require('global/document');\n\nvar _objCreate = Object.create || (function() {\n function F() {}\n return function(o) {\n if (arguments.length !== 1) {\n throw new Error('Object.create shim only accepts one parameter.');\n }\n F.prototype = o;\n return new F();\n };\n})();\n\n// Creates a new ParserError object from an errorData object. The errorData\n// object should have default code and message properties. The default message\n// property can be overriden by passing in a message parameter.\n// See ParsingError.Errors below for acceptable errors.\nfunction ParsingError(errorData, message) {\n this.name = \"ParsingError\";\n this.code = errorData.code;\n this.message = message || errorData.message;\n}\nParsingError.prototype = _objCreate(Error.prototype);\nParsingError.prototype.constructor = ParsingError;\n\n// ParsingError metadata for acceptable ParsingErrors.\nParsingError.Errors = {\n BadSignature: {\n code: 0,\n message: \"Malformed WebVTT signature.\"\n },\n BadTimeStamp: {\n code: 1,\n message: \"Malformed time stamp.\"\n }\n};\n\n// Try to parse input as a time stamp.\nfunction parseTimeStamp(input) {\n\n function computeSeconds(h, m, s, f) {\n return (h | 0) * 3600 + (m | 0) * 60 + (s | 0) + (f | 0) / 1000;\n }\n\n var m = input.match(/^(\\d+):(\\d{1,2})(:\\d{1,2})?\\.(\\d{3})/);\n if (!m) {\n return null;\n }\n\n if (m[3]) {\n // Timestamp takes the form of [hours]:[minutes]:[seconds].[milliseconds]\n return computeSeconds(m[1], m[2], m[3].replace(\":\", \"\"), m[4]);\n } else if (m[1] > 59) {\n // Timestamp takes the form of [hours]:[minutes].[milliseconds]\n // First position is hours as it's over 59.\n return computeSeconds(m[1], m[2], 0, m[4]);\n } else {\n // Timestamp takes the form of [minutes]:[seconds].[milliseconds]\n return computeSeconds(0, m[1], m[2], m[4]);\n }\n}\n\n// A settings object holds key/value pairs and will ignore anything but the first\n// assignment to a specific key.\nfunction Settings() {\n this.values = _objCreate(null);\n}\n\nSettings.prototype = {\n // Only accept the first assignment to any key.\n set: function(k, v) {\n if (!this.get(k) && v !== \"\") {\n this.values[k] = v;\n }\n },\n // Return the value for a key, or a default value.\n // If 'defaultKey' is passed then 'dflt' is assumed to be an object with\n // a number of possible default values as properties where 'defaultKey' is\n // the key of the property that will be chosen; otherwise it's assumed to be\n // a single value.\n get: function(k, dflt, defaultKey) {\n if (defaultKey) {\n return this.has(k) ? this.values[k] : dflt[defaultKey];\n }\n return this.has(k) ? this.values[k] : dflt;\n },\n // Check whether we have a value for a key.\n has: function(k) {\n return k in this.values;\n },\n // Accept a setting if its one of the given alternatives.\n alt: function(k, v, a) {\n for (var n = 0; n < a.length; ++n) {\n if (v === a[n]) {\n this.set(k, v);\n break;\n }\n }\n },\n // Accept a setting if its a valid (signed) integer.\n integer: function(k, v) {\n if (/^-?\\d+$/.test(v)) { // integer\n this.set(k, parseInt(v, 10));\n }\n },\n // Accept a setting if its a valid percentage.\n percent: function(k, v) {\n var m;\n if ((m = v.match(/^([\\d]{1,3})(\\.[\\d]*)?%$/))) {\n v = parseFloat(v);\n if (v >= 0 && v <= 100) {\n this.set(k, v);\n return true;\n }\n }\n return false;\n }\n};\n\n// Helper function to parse input into groups separated by 'groupDelim', and\n// interprete each group as a key/value pair separated by 'keyValueDelim'.\nfunction parseOptions(input, callback, keyValueDelim, groupDelim) {\n var groups = groupDelim ? input.split(groupDelim) : [input];\n for (var i in groups) {\n if (typeof groups[i] !== \"string\") {\n continue;\n }\n var kv = groups[i].split(keyValueDelim);\n if (kv.length !== 2) {\n continue;\n }\n var k = kv[0].trim();\n var v = kv[1].trim();\n callback(k, v);\n }\n}\n\nfunction parseCue(input, cue, regionList) {\n // Remember the original input if we need to throw an error.\n var oInput = input;\n // 4.1 WebVTT timestamp\n function consumeTimeStamp() {\n var ts = parseTimeStamp(input);\n if (ts === null) {\n throw new ParsingError(ParsingError.Errors.BadTimeStamp,\n \"Malformed timestamp: \" + oInput);\n }\n // Remove time stamp from input.\n input = input.replace(/^[^\\sa-zA-Z-]+/, \"\");\n return ts;\n }\n\n // 4.4.2 WebVTT cue settings\n function consumeCueSettings(input, cue) {\n var settings = new Settings();\n\n parseOptions(input, function (k, v) {\n switch (k) {\n case \"region\":\n // Find the last region we parsed with the same region id.\n for (var i = regionList.length - 1; i >= 0; i--) {\n if (regionList[i].id === v) {\n settings.set(k, regionList[i].region);\n break;\n }\n }\n break;\n case \"vertical\":\n settings.alt(k, v, [\"rl\", \"lr\"]);\n break;\n case \"line\":\n var vals = v.split(\",\"),\n vals0 = vals[0];\n settings.integer(k, vals0);\n settings.percent(k, vals0) ? settings.set(\"snapToLines\", false) : null;\n settings.alt(k, vals0, [\"auto\"]);\n if (vals.length === 2) {\n settings.alt(\"lineAlign\", vals[1], [\"start\", \"center\", \"end\"]);\n }\n break;\n case \"position\":\n vals = v.split(\",\");\n settings.percent(k, vals[0]);\n if (vals.length === 2) {\n settings.alt(\"positionAlign\", vals[1], [\"start\", \"center\", \"end\"]);\n }\n break;\n case \"size\":\n settings.percent(k, v);\n break;\n case \"align\":\n settings.alt(k, v, [\"start\", \"center\", \"end\", \"left\", \"right\"]);\n break;\n }\n }, /:/, /\\s/);\n\n // Apply default values for any missing fields.\n cue.region = settings.get(\"region\", null);\n cue.vertical = settings.get(\"vertical\", \"\");\n try {\n cue.line = settings.get(\"line\", \"auto\");\n } catch (e) {}\n cue.lineAlign = settings.get(\"lineAlign\", \"start\");\n cue.snapToLines = settings.get(\"snapToLines\", true);\n cue.size = settings.get(\"size\", 100);\n // Safari still uses the old middle value and won't accept center\n try {\n cue.align = settings.get(\"align\", \"center\");\n } catch (e) {\n cue.align = settings.get(\"align\", \"middle\");\n }\n try {\n cue.position = settings.get(\"position\", \"auto\");\n } catch (e) {\n cue.position = settings.get(\"position\", {\n start: 0,\n left: 0,\n center: 50,\n middle: 50,\n end: 100,\n right: 100\n }, cue.align);\n }\n\n\n cue.positionAlign = settings.get(\"positionAlign\", {\n start: \"start\",\n left: \"start\",\n center: \"center\",\n middle: \"center\",\n end: \"end\",\n right: \"end\"\n }, cue.align);\n }\n\n function skipWhitespace() {\n input = input.replace(/^\\s+/, \"\");\n }\n\n // 4.1 WebVTT cue timings.\n skipWhitespace();\n cue.startTime = consumeTimeStamp(); // (1) collect cue start time\n skipWhitespace();\n if (input.substr(0, 3) !== \"-->\") { // (3) next characters must match \"-->\"\n throw new ParsingError(ParsingError.Errors.BadTimeStamp,\n \"Malformed time stamp (time stamps must be separated by '-->'): \" +\n oInput);\n }\n input = input.substr(3);\n skipWhitespace();\n cue.endTime = consumeTimeStamp(); // (5) collect cue end time\n\n // 4.1 WebVTT cue settings list.\n skipWhitespace();\n consumeCueSettings(input, cue);\n}\n\n// When evaluating this file as part of a Webpack bundle for server\n// side rendering, `document` is an empty object.\nvar TEXTAREA_ELEMENT = document.createElement && document.createElement(\"textarea\");\n\nvar TAG_NAME = {\n c: \"span\",\n i: \"i\",\n b: \"b\",\n u: \"u\",\n ruby: \"ruby\",\n rt: \"rt\",\n v: \"span\",\n lang: \"span\"\n};\n\n// 5.1 default text color\n// 5.2 default text background color is equivalent to text color with bg_ prefix\nvar DEFAULT_COLOR_CLASS = {\n white: 'rgba(255,255,255,1)',\n lime: 'rgba(0,255,0,1)',\n cyan: 'rgba(0,255,255,1)',\n red: 'rgba(255,0,0,1)',\n yellow: 'rgba(255,255,0,1)',\n magenta: 'rgba(255,0,255,1)',\n blue: 'rgba(0,0,255,1)',\n black: 'rgba(0,0,0,1)'\n};\n\nvar TAG_ANNOTATION = {\n v: \"title\",\n lang: \"lang\"\n};\n\nvar NEEDS_PARENT = {\n rt: \"ruby\"\n};\n\n// Parse content into a document fragment.\nfunction parseContent(window, input) {\n function nextToken() {\n // Check for end-of-string.\n if (!input) {\n return null;\n }\n\n // Consume 'n' characters from the input.\n function consume(result) {\n input = input.substr(result.length);\n return result;\n }\n\n var m = input.match(/^([^<]*)(<[^>]*>?)?/);\n // If there is some text before the next tag, return it, otherwise return\n // the tag.\n return consume(m[1] ? m[1] : m[2]);\n }\n\n function unescape(s) {\n TEXTAREA_ELEMENT.innerHTML = s;\n s = TEXTAREA_ELEMENT.textContent;\n TEXTAREA_ELEMENT.textContent = \"\";\n return s;\n }\n\n function shouldAdd(current, element) {\n return !NEEDS_PARENT[element.localName] ||\n NEEDS_PARENT[element.localName] === current.localName;\n }\n\n // Create an element for this tag.\n function createElement(type, annotation) {\n var tagName = TAG_NAME[type];\n if (!tagName) {\n return null;\n }\n var element = window.document.createElement(tagName);\n var name = TAG_ANNOTATION[type];\n if (name && annotation) {\n element[name] = annotation.trim();\n }\n return element;\n }\n\n var rootDiv = window.document.createElement(\"div\"),\n current = rootDiv,\n t,\n tagStack = [];\n\n while ((t = nextToken()) !== null) {\n if (t[0] === '<') {\n if (t[1] === \"/\") {\n // If the closing tag matches, move back up to the parent node.\n if (tagStack.length &&\n tagStack[tagStack.length - 1] === t.substr(2).replace(\">\", \"\")) {\n tagStack.pop();\n current = current.parentNode;\n }\n // Otherwise just ignore the end tag.\n continue;\n }\n var ts = parseTimeStamp(t.substr(1, t.length - 2));\n var node;\n if (ts) {\n // Timestamps are lead nodes as well.\n node = window.document.createProcessingInstruction(\"timestamp\", ts);\n current.appendChild(node);\n continue;\n }\n var m = t.match(/^<([^.\\s/0-9>]+)(\\.[^\\s\\\\>]+)?([^>\\\\]+)?(\\\\?)>?$/);\n // If we can't parse the tag, skip to the next tag.\n if (!m) {\n continue;\n }\n // Try to construct an element, and ignore the tag if we couldn't.\n node = createElement(m[1], m[3]);\n if (!node) {\n continue;\n }\n // Determine if the tag should be added based on the context of where it\n // is placed in the cuetext.\n if (!shouldAdd(current, node)) {\n continue;\n }\n // Set the class list (as a list of classes, separated by space).\n if (m[2]) {\n var classes = m[2].split('.');\n\n classes.forEach(function(cl) {\n var bgColor = /^bg_/.test(cl);\n // slice out `bg_` if it's a background color\n var colorName = bgColor ? cl.slice(3) : cl;\n\n if (DEFAULT_COLOR_CLASS.hasOwnProperty(colorName)) {\n var propName = bgColor ? 'background-color' : 'color';\n var propValue = DEFAULT_COLOR_CLASS[colorName];\n\n node.style[propName] = propValue;\n }\n });\n\n node.className = classes.join(' ');\n }\n // Append the node to the current node, and enter the scope of the new\n // node.\n tagStack.push(m[1]);\n current.appendChild(node);\n current = node;\n continue;\n }\n\n // Text nodes are leaf nodes.\n current.appendChild(window.document.createTextNode(unescape(t)));\n }\n\n return rootDiv;\n}\n\n// This is a list of all the Unicode characters that have a strong\n// right-to-left category. What this means is that these characters are\n// written right-to-left for sure. It was generated by pulling all the strong\n// right-to-left characters out of the Unicode data table. That table can\n// found at: http://www.unicode.org/Public/UNIDATA/UnicodeData.txt\nvar strongRTLRanges = [[0x5be, 0x5be], [0x5c0, 0x5c0], [0x5c3, 0x5c3], [0x5c6, 0x5c6],\n [0x5d0, 0x5ea], [0x5f0, 0x5f4], [0x608, 0x608], [0x60b, 0x60b], [0x60d, 0x60d],\n [0x61b, 0x61b], [0x61e, 0x64a], [0x66d, 0x66f], [0x671, 0x6d5], [0x6e5, 0x6e6],\n [0x6ee, 0x6ef], [0x6fa, 0x70d], [0x70f, 0x710], [0x712, 0x72f], [0x74d, 0x7a5],\n [0x7b1, 0x7b1], [0x7c0, 0x7ea], [0x7f4, 0x7f5], [0x7fa, 0x7fa], [0x800, 0x815],\n [0x81a, 0x81a], [0x824, 0x824], [0x828, 0x828], [0x830, 0x83e], [0x840, 0x858],\n [0x85e, 0x85e], [0x8a0, 0x8a0], [0x8a2, 0x8ac], [0x200f, 0x200f],\n [0xfb1d, 0xfb1d], [0xfb1f, 0xfb28], [0xfb2a, 0xfb36], [0xfb38, 0xfb3c],\n [0xfb3e, 0xfb3e], [0xfb40, 0xfb41], [0xfb43, 0xfb44], [0xfb46, 0xfbc1],\n [0xfbd3, 0xfd3d], [0xfd50, 0xfd8f], [0xfd92, 0xfdc7], [0xfdf0, 0xfdfc],\n [0xfe70, 0xfe74], [0xfe76, 0xfefc], [0x10800, 0x10805], [0x10808, 0x10808],\n [0x1080a, 0x10835], [0x10837, 0x10838], [0x1083c, 0x1083c], [0x1083f, 0x10855],\n [0x10857, 0x1085f], [0x10900, 0x1091b], [0x10920, 0x10939], [0x1093f, 0x1093f],\n [0x10980, 0x109b7], [0x109be, 0x109bf], [0x10a00, 0x10a00], [0x10a10, 0x10a13],\n [0x10a15, 0x10a17], [0x10a19, 0x10a33], [0x10a40, 0x10a47], [0x10a50, 0x10a58],\n [0x10a60, 0x10a7f], [0x10b00, 0x10b35], [0x10b40, 0x10b55], [0x10b58, 0x10b72],\n [0x10b78, 0x10b7f], [0x10c00, 0x10c48], [0x1ee00, 0x1ee03], [0x1ee05, 0x1ee1f],\n [0x1ee21, 0x1ee22], [0x1ee24, 0x1ee24], [0x1ee27, 0x1ee27], [0x1ee29, 0x1ee32],\n [0x1ee34, 0x1ee37], [0x1ee39, 0x1ee39], [0x1ee3b, 0x1ee3b], [0x1ee42, 0x1ee42],\n [0x1ee47, 0x1ee47], [0x1ee49, 0x1ee49], [0x1ee4b, 0x1ee4b], [0x1ee4d, 0x1ee4f],\n [0x1ee51, 0x1ee52], [0x1ee54, 0x1ee54], [0x1ee57, 0x1ee57], [0x1ee59, 0x1ee59],\n [0x1ee5b, 0x1ee5b], [0x1ee5d, 0x1ee5d], [0x1ee5f, 0x1ee5f], [0x1ee61, 0x1ee62],\n [0x1ee64, 0x1ee64], [0x1ee67, 0x1ee6a], [0x1ee6c, 0x1ee72], [0x1ee74, 0x1ee77],\n [0x1ee79, 0x1ee7c], [0x1ee7e, 0x1ee7e], [0x1ee80, 0x1ee89], [0x1ee8b, 0x1ee9b],\n [0x1eea1, 0x1eea3], [0x1eea5, 0x1eea9], [0x1eeab, 0x1eebb], [0x10fffd, 0x10fffd]];\n\nfunction isStrongRTLChar(charCode) {\n for (var i = 0; i < strongRTLRanges.length; i++) {\n var currentRange = strongRTLRanges[i];\n if (charCode >= currentRange[0] && charCode <= currentRange[1]) {\n return true;\n }\n }\n\n return false;\n}\n\nfunction determineBidi(cueDiv) {\n var nodeStack = [],\n text = \"\",\n charCode;\n\n if (!cueDiv || !cueDiv.childNodes) {\n return \"ltr\";\n }\n\n function pushNodes(nodeStack, node) {\n for (var i = node.childNodes.length - 1; i >= 0; i--) {\n nodeStack.push(node.childNodes[i]);\n }\n }\n\n function nextTextNode(nodeStack) {\n if (!nodeStack || !nodeStack.length) {\n return null;\n }\n\n var node = nodeStack.pop(),\n text = node.textContent || node.innerText;\n if (text) {\n // TODO: This should match all unicode type B characters (paragraph\n // separator characters). See issue #115.\n var m = text.match(/^.*(\\n|\\r)/);\n if (m) {\n nodeStack.length = 0;\n return m[0];\n }\n return text;\n }\n if (node.tagName === \"ruby\") {\n return nextTextNode(nodeStack);\n }\n if (node.childNodes) {\n pushNodes(nodeStack, node);\n return nextTextNode(nodeStack);\n }\n }\n\n pushNodes(nodeStack, cueDiv);\n while ((text = nextTextNode(nodeStack))) {\n for (var i = 0; i < text.length; i++) {\n charCode = text.charCodeAt(i);\n if (isStrongRTLChar(charCode)) {\n return \"rtl\";\n }\n }\n }\n return \"ltr\";\n}\n\nfunction computeLinePos(cue) {\n if (typeof cue.line === \"number\" &&\n (cue.snapToLines || (cue.line >= 0 && cue.line <= 100))) {\n return cue.line;\n }\n if (!cue.track || !cue.track.textTrackList ||\n !cue.track.textTrackList.mediaElement) {\n return -1;\n }\n var track = cue.track,\n trackList = track.textTrackList,\n count = 0;\n for (var i = 0; i < trackList.length && trackList[i] !== track; i++) {\n if (trackList[i].mode === \"showing\") {\n count++;\n }\n }\n return ++count * -1;\n}\n\nfunction StyleBox() {\n}\n\n// Apply styles to a div. If there is no div passed then it defaults to the\n// div on 'this'.\nStyleBox.prototype.applyStyles = function(styles, div) {\n div = div || this.div;\n for (var prop in styles) {\n if (styles.hasOwnProperty(prop)) {\n div.style[prop] = styles[prop];\n }\n }\n};\n\nStyleBox.prototype.formatStyle = function(val, unit) {\n return val === 0 ? 0 : val + unit;\n};\n\n// Constructs the computed display state of the cue (a div). Places the div\n// into the overlay which should be a block level element (usually a div).\nfunction CueStyleBox(window, cue, styleOptions) {\n StyleBox.call(this);\n this.cue = cue;\n\n // Parse our cue's text into a DOM tree rooted at 'cueDiv'. This div will\n // have inline positioning and will function as the cue background box.\n this.cueDiv = parseContent(window, cue.text);\n var styles = {\n color: \"rgba(255, 255, 255, 1)\",\n backgroundColor: \"rgba(0, 0, 0, 0.8)\",\n position: \"relative\",\n left: 0,\n right: 0,\n top: 0,\n bottom: 0,\n display: \"inline\",\n writingMode: cue.vertical === \"\" ? \"horizontal-tb\"\n : cue.vertical === \"lr\" ? \"vertical-lr\"\n : \"vertical-rl\",\n unicodeBidi: \"plaintext\"\n };\n\n this.applyStyles(styles, this.cueDiv);\n\n // Create an absolutely positioned div that will be used to position the cue\n // div. Note, all WebVTT cue-setting alignments are equivalent to the CSS\n // mirrors of them except middle instead of center on Safari.\n this.div = window.document.createElement(\"div\");\n styles = {\n direction: determineBidi(this.cueDiv),\n writingMode: cue.vertical === \"\" ? \"horizontal-tb\"\n : cue.vertical === \"lr\" ? \"vertical-lr\"\n : \"vertical-rl\",\n unicodeBidi: \"plaintext\",\n textAlign: cue.align === \"middle\" ? \"center\" : cue.align,\n font: styleOptions.font,\n whiteSpace: \"pre-line\",\n position: \"absolute\"\n };\n\n this.applyStyles(styles);\n this.div.appendChild(this.cueDiv);\n\n // Calculate the distance from the reference edge of the viewport to the text\n // position of the cue box. The reference edge will be resolved later when\n // the box orientation styles are applied.\n var textPos = 0;\n switch (cue.positionAlign) {\n case \"start\":\n case \"line-left\":\n textPos = cue.position;\n break;\n case \"center\":\n textPos = cue.position - (cue.size / 2);\n break;\n case \"end\":\n case \"line-right\":\n textPos = cue.position - cue.size;\n break;\n }\n\n // Horizontal box orientation; textPos is the distance from the left edge of the\n // area to the left edge of the box and cue.size is the distance extending to\n // the right from there.\n if (cue.vertical === \"\") {\n this.applyStyles({\n left: this.formatStyle(textPos, \"%\"),\n width: this.formatStyle(cue.size, \"%\")\n });\n // Vertical box orientation; textPos is the distance from the top edge of the\n // area to the top edge of the box and cue.size is the height extending\n // downwards from there.\n } else {\n this.applyStyles({\n top: this.formatStyle(textPos, \"%\"),\n height: this.formatStyle(cue.size, \"%\")\n });\n }\n\n this.move = function(box) {\n this.applyStyles({\n top: this.formatStyle(box.top, \"px\"),\n bottom: this.formatStyle(box.bottom, \"px\"),\n left: this.formatStyle(box.left, \"px\"),\n right: this.formatStyle(box.right, \"px\"),\n height: this.formatStyle(box.height, \"px\"),\n width: this.formatStyle(box.width, \"px\")\n });\n };\n}\nCueStyleBox.prototype = _objCreate(StyleBox.prototype);\nCueStyleBox.prototype.constructor = CueStyleBox;\n\n// Represents the co-ordinates of an Element in a way that we can easily\n// compute things with such as if it overlaps or intersects with another Element.\n// Can initialize it with either a StyleBox or another BoxPosition.\nfunction BoxPosition(obj) {\n // Either a BoxPosition was passed in and we need to copy it, or a StyleBox\n // was passed in and we need to copy the results of 'getBoundingClientRect'\n // as the object returned is readonly. All co-ordinate values are in reference\n // to the viewport origin (top left).\n var lh, height, width, top;\n if (obj.div) {\n height = obj.div.offsetHeight;\n width = obj.div.offsetWidth;\n top = obj.div.offsetTop;\n\n var rects = (rects = obj.div.childNodes) && (rects = rects[0]) &&\n rects.getClientRects && rects.getClientRects();\n obj = obj.div.getBoundingClientRect();\n // In certain cases the outter div will be slightly larger then the sum of\n // the inner div's lines. This could be due to bold text, etc, on some platforms.\n // In this case we should get the average line height and use that. This will\n // result in the desired behaviour.\n lh = rects ? Math.max((rects[0] && rects[0].height) || 0, obj.height / rects.length)\n : 0;\n\n }\n this.left = obj.left;\n this.right = obj.right;\n this.top = obj.top || top;\n this.height = obj.height || height;\n this.bottom = obj.bottom || (top + (obj.height || height));\n this.width = obj.width || width;\n this.lineHeight = lh !== undefined ? lh : obj.lineHeight;\n}\n\n// Move the box along a particular axis. Optionally pass in an amount to move\n// the box. If no amount is passed then the default is the line height of the\n// box.\nBoxPosition.prototype.move = function(axis, toMove) {\n toMove = toMove !== undefined ? toMove : this.lineHeight;\n switch (axis) {\n case \"+x\":\n this.left += toMove;\n this.right += toMove;\n break;\n case \"-x\":\n this.left -= toMove;\n this.right -= toMove;\n break;\n case \"+y\":\n this.top += toMove;\n this.bottom += toMove;\n break;\n case \"-y\":\n this.top -= toMove;\n this.bottom -= toMove;\n break;\n }\n};\n\n// Check if this box overlaps another box, b2.\nBoxPosition.prototype.overlaps = function(b2) {\n return this.left < b2.right &&\n this.right > b2.left &&\n this.top < b2.bottom &&\n this.bottom > b2.top;\n};\n\n// Check if this box overlaps any other boxes in boxes.\nBoxPosition.prototype.overlapsAny = function(boxes) {\n for (var i = 0; i < boxes.length; i++) {\n if (this.overlaps(boxes[i])) {\n return true;\n }\n }\n return false;\n};\n\n// Check if this box is within another box.\nBoxPosition.prototype.within = function(container) {\n return this.top >= container.top &&\n this.bottom <= container.bottom &&\n this.left >= container.left &&\n this.right <= container.right;\n};\n\n// Check if this box is entirely within the container or it is overlapping\n// on the edge opposite of the axis direction passed. For example, if \"+x\" is\n// passed and the box is overlapping on the left edge of the container, then\n// return true.\nBoxPosition.prototype.overlapsOppositeAxis = function(container, axis) {\n switch (axis) {\n case \"+x\":\n return this.left < container.left;\n case \"-x\":\n return this.right > container.right;\n case \"+y\":\n return this.top < container.top;\n case \"-y\":\n return this.bottom > container.bottom;\n }\n};\n\n// Find the percentage of the area that this box is overlapping with another\n// box.\nBoxPosition.prototype.intersectPercentage = function(b2) {\n var x = Math.max(0, Math.min(this.right, b2.right) - Math.max(this.left, b2.left)),\n y = Math.max(0, Math.min(this.bottom, b2.bottom) - Math.max(this.top, b2.top)),\n intersectArea = x * y;\n return intersectArea / (this.height * this.width);\n};\n\n// Convert the positions from this box to CSS compatible positions using\n// the reference container's positions. This has to be done because this\n// box's positions are in reference to the viewport origin, whereas, CSS\n// values are in referecne to their respective edges.\nBoxPosition.prototype.toCSSCompatValues = function(reference) {\n return {\n top: this.top - reference.top,\n bottom: reference.bottom - this.bottom,\n left: this.left - reference.left,\n right: reference.right - this.right,\n height: this.height,\n width: this.width\n };\n};\n\n// Get an object that represents the box's position without anything extra.\n// Can pass a StyleBox, HTMLElement, or another BoxPositon.\nBoxPosition.getSimpleBoxPosition = function(obj) {\n var height = obj.div ? obj.div.offsetHeight : obj.tagName ? obj.offsetHeight : 0;\n var width = obj.div ? obj.div.offsetWidth : obj.tagName ? obj.offsetWidth : 0;\n var top = obj.div ? obj.div.offsetTop : obj.tagName ? obj.offsetTop : 0;\n\n obj = obj.div ? obj.div.getBoundingClientRect() :\n obj.tagName ? obj.getBoundingClientRect() : obj;\n var ret = {\n left: obj.left,\n right: obj.right,\n top: obj.top || top,\n height: obj.height || height,\n bottom: obj.bottom || (top + (obj.height || height)),\n width: obj.width || width\n };\n return ret;\n};\n\n// Move a StyleBox to its specified, or next best, position. The containerBox\n// is the box that contains the StyleBox, such as a div. boxPositions are\n// a list of other boxes that the styleBox can't overlap with.\nfunction moveBoxToLinePosition(window, styleBox, containerBox, boxPositions) {\n\n // Find the best position for a cue box, b, on the video. The axis parameter\n // is a list of axis, the order of which, it will move the box along. For example:\n // Passing [\"+x\", \"-x\"] will move the box first along the x axis in the positive\n // direction. If it doesn't find a good position for it there it will then move\n // it along the x axis in the negative direction.\n function findBestPosition(b, axis) {\n var bestPosition,\n specifiedPosition = new BoxPosition(b),\n percentage = 1; // Highest possible so the first thing we get is better.\n\n for (var i = 0; i < axis.length; i++) {\n while (b.overlapsOppositeAxis(containerBox, axis[i]) ||\n (b.within(containerBox) && b.overlapsAny(boxPositions))) {\n b.move(axis[i]);\n }\n // We found a spot where we aren't overlapping anything. This is our\n // best position.\n if (b.within(containerBox)) {\n return b;\n }\n var p = b.intersectPercentage(containerBox);\n // If we're outside the container box less then we were on our last try\n // then remember this position as the best position.\n if (percentage > p) {\n bestPosition = new BoxPosition(b);\n percentage = p;\n }\n // Reset the box position to the specified position.\n b = new BoxPosition(specifiedPosition);\n }\n return bestPosition || specifiedPosition;\n }\n\n var boxPosition = new BoxPosition(styleBox),\n cue = styleBox.cue,\n linePos = computeLinePos(cue),\n axis = [];\n\n // If we have a line number to align the cue to.\n if (cue.snapToLines) {\n var size;\n switch (cue.vertical) {\n case \"\":\n axis = [ \"+y\", \"-y\" ];\n size = \"height\";\n break;\n case \"rl\":\n axis = [ \"+x\", \"-x\" ];\n size = \"width\";\n break;\n case \"lr\":\n axis = [ \"-x\", \"+x\" ];\n size = \"width\";\n break;\n }\n\n var step = boxPosition.lineHeight,\n position = step * Math.round(linePos),\n maxPosition = containerBox[size] + step,\n initialAxis = axis[0];\n\n // If the specified intial position is greater then the max position then\n // clamp the box to the amount of steps it would take for the box to\n // reach the max position.\n if (Math.abs(position) > maxPosition) {\n position = position < 0 ? -1 : 1;\n position *= Math.ceil(maxPosition / step) * step;\n }\n\n // If computed line position returns negative then line numbers are\n // relative to the bottom of the video instead of the top. Therefore, we\n // need to increase our initial position by the length or width of the\n // video, depending on the writing direction, and reverse our axis directions.\n if (linePos < 0) {\n position += cue.vertical === \"\" ? containerBox.height : containerBox.width;\n axis = axis.reverse();\n }\n\n // Move the box to the specified position. This may not be its best\n // position.\n boxPosition.move(initialAxis, position);\n\n } else {\n // If we have a percentage line value for the cue.\n var calculatedPercentage = (boxPosition.lineHeight / containerBox.height) * 100;\n\n switch (cue.lineAlign) {\n case \"center\":\n linePos -= (calculatedPercentage / 2);\n break;\n case \"end\":\n linePos -= calculatedPercentage;\n break;\n }\n\n // Apply initial line position to the cue box.\n switch (cue.vertical) {\n case \"\":\n styleBox.applyStyles({\n top: styleBox.formatStyle(linePos, \"%\")\n });\n break;\n case \"rl\":\n styleBox.applyStyles({\n left: styleBox.formatStyle(linePos, \"%\")\n });\n break;\n case \"lr\":\n styleBox.applyStyles({\n right: styleBox.formatStyle(linePos, \"%\")\n });\n break;\n }\n\n axis = [ \"+y\", \"-x\", \"+x\", \"-y\" ];\n\n // Get the box position again after we've applied the specified positioning\n // to it.\n boxPosition = new BoxPosition(styleBox);\n }\n\n var bestPosition = findBestPosition(boxPosition, axis);\n styleBox.move(bestPosition.toCSSCompatValues(containerBox));\n}\n\nfunction WebVTT() {\n // Nothing\n}\n\n// Helper to allow strings to be decoded instead of the default binary utf8 data.\nWebVTT.StringDecoder = function() {\n return {\n decode: function(data) {\n if (!data) {\n return \"\";\n }\n if (typeof data !== \"string\") {\n throw new Error(\"Error - expected string data.\");\n }\n return decodeURIComponent(encodeURIComponent(data));\n }\n };\n};\n\nWebVTT.convertCueToDOMTree = function(window, cuetext) {\n if (!window || !cuetext) {\n return null;\n }\n return parseContent(window, cuetext);\n};\n\nvar FONT_SIZE_PERCENT = 0.05;\nvar FONT_STYLE = \"sans-serif\";\nvar CUE_BACKGROUND_PADDING = \"1.5%\";\n\n// Runs the processing model over the cues and regions passed to it.\n// @param overlay A block level element (usually a div) that the computed cues\n// and regions will be placed into.\nWebVTT.processCues = function(window, cues, overlay) {\n if (!window || !cues || !overlay) {\n return null;\n }\n\n // Remove all previous children.\n while (overlay.firstChild) {\n overlay.removeChild(overlay.firstChild);\n }\n\n var paddedOverlay = window.document.createElement(\"div\");\n paddedOverlay.style.position = \"absolute\";\n paddedOverlay.style.left = \"0\";\n paddedOverlay.style.right = \"0\";\n paddedOverlay.style.top = \"0\";\n paddedOverlay.style.bottom = \"0\";\n paddedOverlay.style.margin = CUE_BACKGROUND_PADDING;\n overlay.appendChild(paddedOverlay);\n\n // Determine if we need to compute the display states of the cues. This could\n // be the case if a cue's state has been changed since the last computation or\n // if it has not been computed yet.\n function shouldCompute(cues) {\n for (var i = 0; i < cues.length; i++) {\n if (cues[i].hasBeenReset || !cues[i].displayState) {\n return true;\n }\n }\n return false;\n }\n\n // We don't need to recompute the cues' display states. Just reuse them.\n if (!shouldCompute(cues)) {\n for (var i = 0; i < cues.length; i++) {\n paddedOverlay.appendChild(cues[i].displayState);\n }\n return;\n }\n\n var boxPositions = [],\n containerBox = BoxPosition.getSimpleBoxPosition(paddedOverlay),\n fontSize = Math.round(containerBox.height * FONT_SIZE_PERCENT * 100) / 100;\n var styleOptions = {\n font: fontSize + \"px \" + FONT_STYLE\n };\n\n (function() {\n var styleBox, cue;\n\n for (var i = 0; i < cues.length; i++) {\n cue = cues[i];\n\n // Compute the intial position and styles of the cue div.\n styleBox = new CueStyleBox(window, cue, styleOptions);\n paddedOverlay.appendChild(styleBox.div);\n\n // Move the cue div to it's correct line position.\n moveBoxToLinePosition(window, styleBox, containerBox, boxPositions);\n\n // Remember the computed div so that we don't have to recompute it later\n // if we don't have too.\n cue.displayState = styleBox.div;\n\n boxPositions.push(BoxPosition.getSimpleBoxPosition(styleBox));\n }\n })();\n};\n\nWebVTT.Parser = function(window, vttjs, decoder) {\n if (!decoder) {\n decoder = vttjs;\n vttjs = {};\n }\n if (!vttjs) {\n vttjs = {};\n }\n\n this.window = window;\n this.vttjs = vttjs;\n this.state = \"INITIAL\";\n this.buffer = \"\";\n this.decoder = decoder || new TextDecoder(\"utf8\");\n this.regionList = [];\n};\n\nWebVTT.Parser.prototype = {\n // If the error is a ParsingError then report it to the consumer if\n // possible. If it's not a ParsingError then throw it like normal.\n reportOrThrowError: function(e) {\n if (e instanceof ParsingError) {\n this.onparsingerror && this.onparsingerror(e);\n } else {\n throw e;\n }\n },\n parse: function (data) {\n var self = this;\n\n // If there is no data then we won't decode it, but will just try to parse\n // whatever is in buffer already. This may occur in circumstances, for\n // example when flush() is called.\n if (data) {\n // Try to decode the data that we received.\n self.buffer += self.decoder.decode(data, {stream: true});\n }\n\n function collectNextLine() {\n var buffer = self.buffer;\n var pos = 0;\n while (pos < buffer.length && buffer[pos] !== '\\r' && buffer[pos] !== '\\n') {\n ++pos;\n }\n var line = buffer.substr(0, pos);\n // Advance the buffer early in case we fail below.\n if (buffer[pos] === '\\r') {\n ++pos;\n }\n if (buffer[pos] === '\\n') {\n ++pos;\n }\n self.buffer = buffer.substr(pos);\n return line;\n }\n\n // 3.4 WebVTT region and WebVTT region settings syntax\n function parseRegion(input) {\n var settings = new Settings();\n\n parseOptions(input, function (k, v) {\n switch (k) {\n case \"id\":\n settings.set(k, v);\n break;\n case \"width\":\n settings.percent(k, v);\n break;\n case \"lines\":\n settings.integer(k, v);\n break;\n case \"regionanchor\":\n case \"viewportanchor\":\n var xy = v.split(',');\n if (xy.length !== 2) {\n break;\n }\n // We have to make sure both x and y parse, so use a temporary\n // settings object here.\n var anchor = new Settings();\n anchor.percent(\"x\", xy[0]);\n anchor.percent(\"y\", xy[1]);\n if (!anchor.has(\"x\") || !anchor.has(\"y\")) {\n break;\n }\n settings.set(k + \"X\", anchor.get(\"x\"));\n settings.set(k + \"Y\", anchor.get(\"y\"));\n break;\n case \"scroll\":\n settings.alt(k, v, [\"up\"]);\n break;\n }\n }, /=/, /\\s/);\n\n // Create the region, using default values for any values that were not\n // specified.\n if (settings.has(\"id\")) {\n var region = new (self.vttjs.VTTRegion || self.window.VTTRegion)();\n region.width = settings.get(\"width\", 100);\n region.lines = settings.get(\"lines\", 3);\n region.regionAnchorX = settings.get(\"regionanchorX\", 0);\n region.regionAnchorY = settings.get(\"regionanchorY\", 100);\n region.viewportAnchorX = settings.get(\"viewportanchorX\", 0);\n region.viewportAnchorY = settings.get(\"viewportanchorY\", 100);\n region.scroll = settings.get(\"scroll\", \"\");\n // Register the region.\n self.onregion && self.onregion(region);\n // Remember the VTTRegion for later in case we parse any VTTCues that\n // reference it.\n self.regionList.push({\n id: settings.get(\"id\"),\n region: region\n });\n }\n }\n\n // draft-pantos-http-live-streaming-20\n // https://tools.ietf.org/html/draft-pantos-http-live-streaming-20#section-3.5\n // 3.5 WebVTT\n function parseTimestampMap(input) {\n var settings = new Settings();\n\n parseOptions(input, function(k, v) {\n switch(k) {\n case \"MPEGT\":\n settings.integer(k + 'S', v);\n break;\n case \"LOCA\":\n settings.set(k + 'L', parseTimeStamp(v));\n break;\n }\n }, /[^\\d]:/, /,/);\n\n self.ontimestampmap && self.ontimestampmap({\n \"MPEGTS\": settings.get(\"MPEGTS\"),\n \"LOCAL\": settings.get(\"LOCAL\")\n });\n }\n\n // 3.2 WebVTT metadata header syntax\n function parseHeader(input) {\n if (input.match(/X-TIMESTAMP-MAP/)) {\n // This line contains HLS X-TIMESTAMP-MAP metadata\n parseOptions(input, function(k, v) {\n switch(k) {\n case \"X-TIMESTAMP-MAP\":\n parseTimestampMap(v);\n break;\n }\n }, /=/);\n } else {\n parseOptions(input, function (k, v) {\n switch (k) {\n case \"Region\":\n // 3.3 WebVTT region metadata header syntax\n parseRegion(v);\n break;\n }\n }, /:/);\n }\n\n }\n\n // 5.1 WebVTT file parsing.\n try {\n var line;\n if (self.state === \"INITIAL\") {\n // We can't start parsing until we have the first line.\n if (!/\\r\\n|\\n/.test(self.buffer)) {\n return this;\n }\n\n line = collectNextLine();\n\n var m = line.match(/^WEBVTT([ \\t].*)?$/);\n if (!m || !m[0]) {\n throw new ParsingError(ParsingError.Errors.BadSignature);\n }\n\n self.state = \"HEADER\";\n }\n\n var alreadyCollectedLine = false;\n while (self.buffer) {\n // We can't parse a line until we have the full line.\n if (!/\\r\\n|\\n/.test(self.buffer)) {\n return this;\n }\n\n if (!alreadyCollectedLine) {\n line = collectNextLine();\n } else {\n alreadyCollectedLine = false;\n }\n\n switch (self.state) {\n case \"HEADER\":\n // 13-18 - Allow a header (metadata) under the WEBVTT line.\n if (/:/.test(line)) {\n parseHeader(line);\n } else if (!line) {\n // An empty line terminates the header and starts the body (cues).\n self.state = \"ID\";\n }\n continue;\n case \"NOTE\":\n // Ignore NOTE blocks.\n if (!line) {\n self.state = \"ID\";\n }\n continue;\n case \"ID\":\n // Check for the start of NOTE blocks.\n if (/^NOTE($|[ \\t])/.test(line)) {\n self.state = \"NOTE\";\n break;\n }\n // 19-29 - Allow any number of line terminators, then initialize new cue values.\n if (!line) {\n continue;\n }\n self.cue = new (self.vttjs.VTTCue || self.window.VTTCue)(0, 0, \"\");\n // Safari still uses the old middle value and won't accept center\n try {\n self.cue.align = \"center\";\n } catch (e) {\n self.cue.align = \"middle\";\n }\n self.state = \"CUE\";\n // 30-39 - Check if self line contains an optional identifier or timing data.\n if (line.indexOf(\"-->\") === -1) {\n self.cue.id = line;\n continue;\n }\n // Process line as start of a cue.\n /*falls through*/\n case \"CUE\":\n // 40 - Collect cue timings and settings.\n try {\n parseCue(line, self.cue, self.regionList);\n } catch (e) {\n self.reportOrThrowError(e);\n // In case of an error ignore rest of the cue.\n self.cue = null;\n self.state = \"BADCUE\";\n continue;\n }\n self.state = \"CUETEXT\";\n continue;\n case \"CUETEXT\":\n var hasSubstring = line.indexOf(\"-->\") !== -1;\n // 34 - If we have an empty line then report the cue.\n // 35 - If we have the special substring '-->' then report the cue,\n // but do not collect the line as we need to process the current\n // one as a new cue.\n if (!line || hasSubstring && (alreadyCollectedLine = true)) {\n // We are done parsing self cue.\n self.oncue && self.oncue(self.cue);\n self.cue = null;\n self.state = \"ID\";\n continue;\n }\n if (self.cue.text) {\n self.cue.text += \"\\n\";\n }\n self.cue.text += line.replace(/\\u2028/g, '\\n').replace(/u2029/g, '\\n');\n continue;\n case \"BADCUE\": // BADCUE\n // 54-62 - Collect and discard the remaining cue.\n if (!line) {\n self.state = \"ID\";\n }\n continue;\n }\n }\n } catch (e) {\n self.reportOrThrowError(e);\n\n // If we are currently parsing a cue, report what we have.\n if (self.state === \"CUETEXT\" && self.cue && self.oncue) {\n self.oncue(self.cue);\n }\n self.cue = null;\n // Enter BADWEBVTT state if header was not parsed correctly otherwise\n // another exception occurred so enter BADCUE state.\n self.state = self.state === \"INITIAL\" ? \"BADWEBVTT\" : \"BADCUE\";\n }\n return this;\n },\n flush: function () {\n var self = this;\n try {\n // Finish decoding the stream.\n self.buffer += self.decoder.decode();\n // Synthesize the end of the current cue or region.\n if (self.cue || self.state === \"HEADER\") {\n self.buffer += \"\\n\\n\";\n self.parse();\n }\n // If we've flushed, parsed, and we're still on the INITIAL state then\n // that means we don't have enough of the stream to parse the first\n // line.\n if (self.state === \"INITIAL\") {\n throw new ParsingError(ParsingError.Errors.BadSignature);\n }\n } catch(e) {\n self.reportOrThrowError(e);\n }\n self.onflush && self.onflush();\n return this;\n }\n};\n\nmodule.exports = WebVTT;\n", "/**\n * Copyright 2013 vtt.js Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar autoKeyword = \"auto\";\nvar directionSetting = {\n \"\": 1,\n \"lr\": 1,\n \"rl\": 1\n};\nvar alignSetting = {\n \"start\": 1,\n \"center\": 1,\n \"end\": 1,\n \"left\": 1,\n \"right\": 1,\n \"auto\": 1,\n \"line-left\": 1,\n \"line-right\": 1\n};\n\nfunction findDirectionSetting(value) {\n if (typeof value !== \"string\") {\n return false;\n }\n var dir = directionSetting[value.toLowerCase()];\n return dir ? value.toLowerCase() : false;\n}\n\nfunction findAlignSetting(value) {\n if (typeof value !== \"string\") {\n return false;\n }\n var align = alignSetting[value.toLowerCase()];\n return align ? value.toLowerCase() : false;\n}\n\nfunction VTTCue(startTime, endTime, text) {\n /**\n * Shim implementation specific properties. These properties are not in\n * the spec.\n */\n\n // Lets us know when the VTTCue's data has changed in such a way that we need\n // to recompute its display state. This lets us compute its display state\n // lazily.\n this.hasBeenReset = false;\n\n /**\n * VTTCue and TextTrackCue properties\n * http://dev.w3.org/html5/webvtt/#vttcue-interface\n */\n\n var _id = \"\";\n var _pauseOnExit = false;\n var _startTime = startTime;\n var _endTime = endTime;\n var _text = text;\n var _region = null;\n var _vertical = \"\";\n var _snapToLines = true;\n var _line = \"auto\";\n var _lineAlign = \"start\";\n var _position = \"auto\";\n var _positionAlign = \"auto\";\n var _size = 100;\n var _align = \"center\";\n\n Object.defineProperties(this, {\n \"id\": {\n enumerable: true,\n get: function() {\n return _id;\n },\n set: function(value) {\n _id = \"\" + value;\n }\n },\n\n \"pauseOnExit\": {\n enumerable: true,\n get: function() {\n return _pauseOnExit;\n },\n set: function(value) {\n _pauseOnExit = !!value;\n }\n },\n\n \"startTime\": {\n enumerable: true,\n get: function() {\n return _startTime;\n },\n set: function(value) {\n if (typeof value !== \"number\") {\n throw new TypeError(\"Start time must be set to a number.\");\n }\n _startTime = value;\n this.hasBeenReset = true;\n }\n },\n\n \"endTime\": {\n enumerable: true,\n get: function() {\n return _endTime;\n },\n set: function(value) {\n if (typeof value !== \"number\") {\n throw new TypeError(\"End time must be set to a number.\");\n }\n _endTime = value;\n this.hasBeenReset = true;\n }\n },\n\n \"text\": {\n enumerable: true,\n get: function() {\n return _text;\n },\n set: function(value) {\n _text = \"\" + value;\n this.hasBeenReset = true;\n }\n },\n\n \"region\": {\n enumerable: true,\n get: function() {\n return _region;\n },\n set: function(value) {\n _region = value;\n this.hasBeenReset = true;\n }\n },\n\n \"vertical\": {\n enumerable: true,\n get: function() {\n return _vertical;\n },\n set: function(value) {\n var setting = findDirectionSetting(value);\n // Have to check for false because the setting an be an empty string.\n if (setting === false) {\n throw new SyntaxError(\"Vertical: an invalid or illegal direction string was specified.\");\n }\n _vertical = setting;\n this.hasBeenReset = true;\n }\n },\n\n \"snapToLines\": {\n enumerable: true,\n get: function() {\n return _snapToLines;\n },\n set: function(value) {\n _snapToLines = !!value;\n this.hasBeenReset = true;\n }\n },\n\n \"line\": {\n enumerable: true,\n get: function() {\n return _line;\n },\n set: function(value) {\n if (typeof value !== \"number\" && value !== autoKeyword) {\n throw new SyntaxError(\"Line: an invalid number or illegal string was specified.\");\n }\n _line = value;\n this.hasBeenReset = true;\n }\n },\n\n \"lineAlign\": {\n enumerable: true,\n get: function() {\n return _lineAlign;\n },\n set: function(value) {\n var setting = findAlignSetting(value);\n if (!setting) {\n console.warn(\"lineAlign: an invalid or illegal string was specified.\");\n } else {\n _lineAlign = setting;\n this.hasBeenReset = true;\n }\n }\n },\n\n \"position\": {\n enumerable: true,\n get: function() {\n return _position;\n },\n set: function(value) {\n if (value < 0 || value > 100) {\n throw new Error(\"Position must be between 0 and 100.\");\n }\n _position = value;\n this.hasBeenReset = true;\n }\n },\n\n \"positionAlign\": {\n enumerable: true,\n get: function() {\n return _positionAlign;\n },\n set: function(value) {\n var setting = findAlignSetting(value);\n if (!setting) {\n console.warn(\"positionAlign: an invalid or illegal string was specified.\");\n } else {\n _positionAlign = setting;\n this.hasBeenReset = true;\n }\n }\n },\n\n \"size\": {\n enumerable: true,\n get: function() {\n return _size;\n },\n set: function(value) {\n if (value < 0 || value > 100) {\n throw new Error(\"Size must be between 0 and 100.\");\n }\n _size = value;\n this.hasBeenReset = true;\n }\n },\n\n \"align\": {\n enumerable: true,\n get: function() {\n return _align;\n },\n set: function(value) {\n var setting = findAlignSetting(value);\n if (!setting) {\n throw new SyntaxError(\"align: an invalid or illegal alignment string was specified.\");\n }\n _align = setting;\n this.hasBeenReset = true;\n }\n }\n });\n\n /**\n * Other spec defined properties\n */\n\n // http://www.whatwg.org/specs/web-apps/current-work/multipage/the-video-element.html#text-track-cue-display-state\n this.displayState = undefined;\n}\n\n/**\n * VTTCue methods\n */\n\nVTTCue.prototype.getCueAsHTML = function() {\n // Assume WebVTT.convertCueToDOMTree is on the global.\n return WebVTT.convertCueToDOMTree(window, this.text);\n};\n\nmodule.exports = VTTCue;\n", "/**\n * Copyright 2013 vtt.js Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar scrollSetting = {\n \"\": true,\n \"up\": true\n};\n\nfunction findScrollSetting(value) {\n if (typeof value !== \"string\") {\n return false;\n }\n var scroll = scrollSetting[value.toLowerCase()];\n return scroll ? value.toLowerCase() : false;\n}\n\nfunction isValidPercentValue(value) {\n return typeof value === \"number\" && (value >= 0 && value <= 100);\n}\n\n// VTTRegion shim http://dev.w3.org/html5/webvtt/#vttregion-interface\nfunction VTTRegion() {\n var _width = 100;\n var _lines = 3;\n var _regionAnchorX = 0;\n var _regionAnchorY = 100;\n var _viewportAnchorX = 0;\n var _viewportAnchorY = 100;\n var _scroll = \"\";\n\n Object.defineProperties(this, {\n \"width\": {\n enumerable: true,\n get: function() {\n return _width;\n },\n set: function(value) {\n if (!isValidPercentValue(value)) {\n throw new Error(\"Width must be between 0 and 100.\");\n }\n _width = value;\n }\n },\n \"lines\": {\n enumerable: true,\n get: function() {\n return _lines;\n },\n set: function(value) {\n if (typeof value !== \"number\") {\n throw new TypeError(\"Lines must be set to a number.\");\n }\n _lines = value;\n }\n },\n \"regionAnchorY\": {\n enumerable: true,\n get: function() {\n return _regionAnchorY;\n },\n set: function(value) {\n if (!isValidPercentValue(value)) {\n throw new Error(\"RegionAnchorX must be between 0 and 100.\");\n }\n _regionAnchorY = value;\n }\n },\n \"regionAnchorX\": {\n enumerable: true,\n get: function() {\n return _regionAnchorX;\n },\n set: function(value) {\n if(!isValidPercentValue(value)) {\n throw new Error(\"RegionAnchorY must be between 0 and 100.\");\n }\n _regionAnchorX = value;\n }\n },\n \"viewportAnchorY\": {\n enumerable: true,\n get: function() {\n return _viewportAnchorY;\n },\n set: function(value) {\n if (!isValidPercentValue(value)) {\n throw new Error(\"ViewportAnchorY must be between 0 and 100.\");\n }\n _viewportAnchorY = value;\n }\n },\n \"viewportAnchorX\": {\n enumerable: true,\n get: function() {\n return _viewportAnchorX;\n },\n set: function(value) {\n if (!isValidPercentValue(value)) {\n throw new Error(\"ViewportAnchorX must be between 0 and 100.\");\n }\n _viewportAnchorX = value;\n }\n },\n \"scroll\": {\n enumerable: true,\n get: function() {\n return _scroll;\n },\n set: function(value) {\n var setting = findScrollSetting(value);\n // Have to check for false as an empty string is a legal value.\n if (setting === false) {\n console.warn(\"Scroll: an invalid or illegal string was specified.\");\n } else {\n _scroll = setting;\n }\n }\n }\n });\n}\n\nmodule.exports = VTTRegion;\n", "/**\n * Copyright 2013 vtt.js Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// Default exports for Node. Export the extended versions of VTTCue and\n// VTTRegion in Node since we likely want the capability to convert back and\n// forth between JSON. If we don't then it's not that big of a deal since we're\n// off browser.\n\nvar window = require('global/window');\n\nvar vttjs = module.exports = {\n WebVTT: require(\"./vtt.js\"),\n VTTCue: require(\"./vttcue.js\"),\n VTTRegion: require(\"./vttregion.js\")\n};\n\nwindow.vttjs = vttjs;\nwindow.WebVTT = vttjs.WebVTT;\n\nvar cueShim = vttjs.VTTCue;\nvar regionShim = vttjs.VTTRegion;\nvar nativeVTTCue = window.VTTCue;\nvar nativeVTTRegion = window.VTTRegion;\n\nvttjs.shim = function() {\n window.VTTCue = cueShim;\n window.VTTRegion = regionShim;\n};\n\nvttjs.restore = function() {\n window.VTTCue = nativeVTTCue;\n window.VTTRegion = nativeVTTRegion;\n};\n\nif (!window.VTTCue) {\n vttjs.shim();\n}\n", "// see https://tools.ietf.org/html/rfc1808\n\n(function (root) {\n var URL_REGEX =\n /^(?=((?:[a-zA-Z0-9+\\-.]+:)?))\\1(?=((?:\\/\\/[^\\/?#]*)?))\\2(?=((?:(?:[^?#\\/]*\\/)*[^;?#\\/]*)?))\\3((?:;[^?#]*)?)(\\?[^#]*)?(#[^]*)?$/;\n var FIRST_SEGMENT_REGEX = /^(?=([^\\/?#]*))\\1([^]*)$/;\n var SLASH_DOT_REGEX = /(?:\\/|^)\\.(?=\\/)/g;\n var SLASH_DOT_DOT_REGEX = /(?:\\/|^)\\.\\.\\/(?!\\.\\.\\/)[^\\/]*(?=\\/)/g;\n\n var URLToolkit = {\n // If opts.alwaysNormalize is true then the path will always be normalized even when it starts with / or //\n // E.g\n // With opts.alwaysNormalize = false (default, spec compliant)\n // http://a.com/b/cd + /e/f/../g => http://a.com/e/f/../g\n // With opts.alwaysNormalize = true (not spec compliant)\n // http://a.com/b/cd + /e/f/../g => http://a.com/e/g\n buildAbsoluteURL: function (baseURL, relativeURL, opts) {\n opts = opts || {};\n // remove any remaining space and CRLF\n baseURL = baseURL.trim();\n relativeURL = relativeURL.trim();\n if (!relativeURL) {\n // 2a) If the embedded URL is entirely empty, it inherits the\n // entire base URL (i.e., is set equal to the base URL)\n // and we are done.\n if (!opts.alwaysNormalize) {\n return baseURL;\n }\n var basePartsForNormalise = URLToolkit.parseURL(baseURL);\n if (!basePartsForNormalise) {\n throw new Error('Error trying to parse base URL.');\n }\n basePartsForNormalise.path = URLToolkit.normalizePath(\n basePartsForNormalise.path\n );\n return URLToolkit.buildURLFromParts(basePartsForNormalise);\n }\n var relativeParts = URLToolkit.parseURL(relativeURL);\n if (!relativeParts) {\n throw new Error('Error trying to parse relative URL.');\n }\n if (relativeParts.scheme) {\n // 2b) If the embedded URL starts with a scheme name, it is\n // interpreted as an absolute URL and we are done.\n if (!opts.alwaysNormalize) {\n return relativeURL;\n }\n relativeParts.path = URLToolkit.normalizePath(relativeParts.path);\n return URLToolkit.buildURLFromParts(relativeParts);\n }\n var baseParts = URLToolkit.parseURL(baseURL);\n if (!baseParts) {\n throw new Error('Error trying to parse base URL.');\n }\n if (!baseParts.netLoc && baseParts.path && baseParts.path[0] !== '/') {\n // If netLoc missing and path doesn't start with '/', assume everthing before the first '/' is the netLoc\n // This causes 'example.com/a' to be handled as '//example.com/a' instead of '/example.com/a'\n var pathParts = FIRST_SEGMENT_REGEX.exec(baseParts.path);\n baseParts.netLoc = pathParts[1];\n baseParts.path = pathParts[2];\n }\n if (baseParts.netLoc && !baseParts.path) {\n baseParts.path = '/';\n }\n var builtParts = {\n // 2c) Otherwise, the embedded URL inherits the scheme of\n // the base URL.\n scheme: baseParts.scheme,\n netLoc: relativeParts.netLoc,\n path: null,\n params: relativeParts.params,\n query: relativeParts.query,\n fragment: relativeParts.fragment,\n };\n if (!relativeParts.netLoc) {\n // 3) If the embedded URL's is non-empty, we skip to\n // Step 7. Otherwise, the embedded URL inherits the \n // (if any) of the base URL.\n builtParts.netLoc = baseParts.netLoc;\n // 4) If the embedded URL path is preceded by a slash \"/\", the\n // path is not relative and we skip to Step 7.\n if (relativeParts.path[0] !== '/') {\n if (!relativeParts.path) {\n // 5) If the embedded URL path is empty (and not preceded by a\n // slash), then the embedded URL inherits the base URL path\n builtParts.path = baseParts.path;\n // 5a) if the embedded URL's is non-empty, we skip to\n // step 7; otherwise, it inherits the of the base\n // URL (if any) and\n if (!relativeParts.params) {\n builtParts.params = baseParts.params;\n // 5b) if the embedded URL's is non-empty, we skip to\n // step 7; otherwise, it inherits the of the base\n // URL (if any) and we skip to step 7.\n if (!relativeParts.query) {\n builtParts.query = baseParts.query;\n }\n }\n } else {\n // 6) The last segment of the base URL's path (anything\n // following the rightmost slash \"/\", or the entire path if no\n // slash is present) is removed and the embedded URL's path is\n // appended in its place.\n var baseURLPath = baseParts.path;\n var newPath =\n baseURLPath.substring(0, baseURLPath.lastIndexOf('/') + 1) +\n relativeParts.path;\n builtParts.path = URLToolkit.normalizePath(newPath);\n }\n }\n }\n if (builtParts.path === null) {\n builtParts.path = opts.alwaysNormalize\n ? URLToolkit.normalizePath(relativeParts.path)\n : relativeParts.path;\n }\n return URLToolkit.buildURLFromParts(builtParts);\n },\n parseURL: function (url) {\n var parts = URL_REGEX.exec(url);\n if (!parts) {\n return null;\n }\n return {\n scheme: parts[1] || '',\n netLoc: parts[2] || '',\n path: parts[3] || '',\n params: parts[4] || '',\n query: parts[5] || '',\n fragment: parts[6] || '',\n };\n },\n normalizePath: function (path) {\n // The following operations are\n // then applied, in order, to the new path:\n // 6a) All occurrences of \"./\", where \".\" is a complete path\n // segment, are removed.\n // 6b) If the path ends with \".\" as a complete path segment,\n // that \".\" is removed.\n path = path.split('').reverse().join('').replace(SLASH_DOT_REGEX, '');\n // 6c) All occurrences of \"/../\", where is a\n // complete path segment not equal to \"..\", are removed.\n // Removal of these path segments is performed iteratively,\n // removing the leftmost matching pattern on each iteration,\n // until no matching pattern remains.\n // 6d) If the path ends with \"/..\", where is a\n // complete path segment not equal to \"..\", that\n // \"/..\" is removed.\n while (\n path.length !== (path = path.replace(SLASH_DOT_DOT_REGEX, '')).length\n ) {}\n return path.split('').reverse().join('');\n },\n buildURLFromParts: function (parts) {\n return (\n parts.scheme +\n parts.netLoc +\n parts.path +\n parts.params +\n parts.query +\n parts.fragment\n );\n },\n };\n\n if (typeof exports === 'object' && typeof module === 'object')\n module.exports = URLToolkit;\n else if (typeof define === 'function' && define.amd)\n define([], function () {\n return URLToolkit;\n });\n else if (typeof exports === 'object') exports['URLToolkit'] = URLToolkit;\n else root['URLToolkit'] = URLToolkit;\n})(this);\n", "'use strict'\n\n/**\n * Ponyfill for `Array.prototype.find` which is only available in ES6 runtimes.\n *\n * Works with anything that has a `length` property and index access properties, including NodeList.\n *\n * @template {unknown} T\n * @param {Array | ({length:number, [number]: T})} list\n * @param {function (item: T, index: number, list:Array | ({length:number, [number]: T})):boolean} predicate\n * @param {Partial>?} ac `Array.prototype` by default,\n * \t\t\t\tallows injecting a custom implementation in tests\n * @returns {T | undefined}\n *\n * @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/find\n * @see https://tc39.es/ecma262/multipage/indexed-collections.html#sec-array.prototype.find\n */\nfunction find(list, predicate, ac) {\n\tif (ac === undefined) {\n\t\tac = Array.prototype;\n\t}\n\tif (list && typeof ac.find === 'function') {\n\t\treturn ac.find.call(list, predicate);\n\t}\n\tfor (var i = 0; i < list.length; i++) {\n\t\tif (Object.prototype.hasOwnProperty.call(list, i)) {\n\t\t\tvar item = list[i];\n\t\t\tif (predicate.call(undefined, item, i, list)) {\n\t\t\t\treturn item;\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * \"Shallow freezes\" an object to render it immutable.\n * Uses `Object.freeze` if available,\n * otherwise the immutability is only in the type.\n *\n * Is used to create \"enum like\" objects.\n *\n * @template T\n * @param {T} object the object to freeze\n * @param {Pick = Object} oc `Object` by default,\n * \t\t\t\tallows to inject custom object constructor for tests\n * @returns {Readonly}\n *\n * @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/freeze\n */\nfunction freeze(object, oc) {\n\tif (oc === undefined) {\n\t\toc = Object\n\t}\n\treturn oc && typeof oc.freeze === 'function' ? oc.freeze(object) : object\n}\n\n/**\n * Since we can not rely on `Object.assign` we provide a simplified version\n * that is sufficient for our needs.\n *\n * @param {Object} target\n * @param {Object | null | undefined} source\n *\n * @returns {Object} target\n * @throws TypeError if target is not an object\n *\n * @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign\n * @see https://tc39.es/ecma262/multipage/fundamental-objects.html#sec-object.assign\n */\nfunction assign(target, source) {\n\tif (target === null || typeof target !== 'object') {\n\t\tthrow new TypeError('target is not an object')\n\t}\n\tfor (var key in source) {\n\t\tif (Object.prototype.hasOwnProperty.call(source, key)) {\n\t\t\ttarget[key] = source[key]\n\t\t}\n\t}\n\treturn target\n}\n\n/**\n * All mime types that are allowed as input to `DOMParser.parseFromString`\n *\n * @see https://developer.mozilla.org/en-US/docs/Web/API/DOMParser/parseFromString#Argument02 MDN\n * @see https://html.spec.whatwg.org/multipage/dynamic-markup-insertion.html#domparsersupportedtype WHATWG HTML Spec\n * @see DOMParser.prototype.parseFromString\n */\nvar MIME_TYPE = freeze({\n\t/**\n\t * `text/html`, the only mime type that triggers treating an XML document as HTML.\n\t *\n\t * @see DOMParser.SupportedType.isHTML\n\t * @see https://www.iana.org/assignments/media-types/text/html IANA MimeType registration\n\t * @see https://en.wikipedia.org/wiki/HTML Wikipedia\n\t * @see https://developer.mozilla.org/en-US/docs/Web/API/DOMParser/parseFromString MDN\n\t * @see https://html.spec.whatwg.org/multipage/dynamic-markup-insertion.html#dom-domparser-parsefromstring WHATWG HTML Spec\n\t */\n\tHTML: 'text/html',\n\n\t/**\n\t * Helper method to check a mime type if it indicates an HTML document\n\t *\n\t * @param {string} [value]\n\t * @returns {boolean}\n\t *\n\t * @see https://www.iana.org/assignments/media-types/text/html IANA MimeType registration\n\t * @see https://en.wikipedia.org/wiki/HTML Wikipedia\n\t * @see https://developer.mozilla.org/en-US/docs/Web/API/DOMParser/parseFromString MDN\n\t * @see https://html.spec.whatwg.org/multipage/dynamic-markup-insertion.html#dom-domparser-parsefromstring \t */\n\tisHTML: function (value) {\n\t\treturn value === MIME_TYPE.HTML\n\t},\n\n\t/**\n\t * `application/xml`, the standard mime type for XML documents.\n\t *\n\t * @see https://www.iana.org/assignments/media-types/application/xml IANA MimeType registration\n\t * @see https://tools.ietf.org/html/rfc7303#section-9.1 RFC 7303\n\t * @see https://en.wikipedia.org/wiki/XML_and_MIME Wikipedia\n\t */\n\tXML_APPLICATION: 'application/xml',\n\n\t/**\n\t * `text/html`, an alias for `application/xml`.\n\t *\n\t * @see https://tools.ietf.org/html/rfc7303#section-9.2 RFC 7303\n\t * @see https://www.iana.org/assignments/media-types/text/xml IANA MimeType registration\n\t * @see https://en.wikipedia.org/wiki/XML_and_MIME Wikipedia\n\t */\n\tXML_TEXT: 'text/xml',\n\n\t/**\n\t * `application/xhtml+xml`, indicates an XML document that has the default HTML namespace,\n\t * but is parsed as an XML document.\n\t *\n\t * @see https://www.iana.org/assignments/media-types/application/xhtml+xml IANA MimeType registration\n\t * @see https://dom.spec.whatwg.org/#dom-domimplementation-createdocument WHATWG DOM Spec\n\t * @see https://en.wikipedia.org/wiki/XHTML Wikipedia\n\t */\n\tXML_XHTML_APPLICATION: 'application/xhtml+xml',\n\n\t/**\n\t * `image/svg+xml`,\n\t *\n\t * @see https://www.iana.org/assignments/media-types/image/svg+xml IANA MimeType registration\n\t * @see https://www.w3.org/TR/SVG11/ W3C SVG 1.1\n\t * @see https://en.wikipedia.org/wiki/Scalable_Vector_Graphics Wikipedia\n\t */\n\tXML_SVG_IMAGE: 'image/svg+xml',\n})\n\n/**\n * Namespaces that are used in this code base.\n *\n * @see http://www.w3.org/TR/REC-xml-names\n */\nvar NAMESPACE = freeze({\n\t/**\n\t * The XHTML namespace.\n\t *\n\t * @see http://www.w3.org/1999/xhtml\n\t */\n\tHTML: 'http://www.w3.org/1999/xhtml',\n\n\t/**\n\t * Checks if `uri` equals `NAMESPACE.HTML`.\n\t *\n\t * @param {string} [uri]\n\t *\n\t * @see NAMESPACE.HTML\n\t */\n\tisHTML: function (uri) {\n\t\treturn uri === NAMESPACE.HTML\n\t},\n\n\t/**\n\t * The SVG namespace.\n\t *\n\t * @see http://www.w3.org/2000/svg\n\t */\n\tSVG: 'http://www.w3.org/2000/svg',\n\n\t/**\n\t * The `xml:` namespace.\n\t *\n\t * @see http://www.w3.org/XML/1998/namespace\n\t */\n\tXML: 'http://www.w3.org/XML/1998/namespace',\n\n\t/**\n\t * The `xmlns:` namespace\n\t *\n\t * @see https://www.w3.org/2000/xmlns/\n\t */\n\tXMLNS: 'http://www.w3.org/2000/xmlns/',\n})\n\nexports.assign = assign;\nexports.find = find;\nexports.freeze = freeze;\nexports.MIME_TYPE = MIME_TYPE;\nexports.NAMESPACE = NAMESPACE;\n", "var conventions = require(\"./conventions\");\n\nvar find = conventions.find;\nvar NAMESPACE = conventions.NAMESPACE;\n\n/**\n * A prerequisite for `[].filter`, to drop elements that are empty\n * @param {string} input\n * @returns {boolean}\n */\nfunction notEmptyString (input) {\n\treturn input !== ''\n}\n/**\n * @see https://infra.spec.whatwg.org/#split-on-ascii-whitespace\n * @see https://infra.spec.whatwg.org/#ascii-whitespace\n *\n * @param {string} input\n * @returns {string[]} (can be empty)\n */\nfunction splitOnASCIIWhitespace(input) {\n\t// U+0009 TAB, U+000A LF, U+000C FF, U+000D CR, U+0020 SPACE\n\treturn input ? input.split(/[\\t\\n\\f\\r ]+/).filter(notEmptyString) : []\n}\n\n/**\n * Adds element as a key to current if it is not already present.\n *\n * @param {Record} current\n * @param {string} element\n * @returns {Record}\n */\nfunction orderedSetReducer (current, element) {\n\tif (!current.hasOwnProperty(element)) {\n\t\tcurrent[element] = true;\n\t}\n\treturn current;\n}\n\n/**\n * @see https://infra.spec.whatwg.org/#ordered-set\n * @param {string} input\n * @returns {string[]}\n */\nfunction toOrderedSet(input) {\n\tif (!input) return [];\n\tvar list = splitOnASCIIWhitespace(input);\n\treturn Object.keys(list.reduce(orderedSetReducer, {}))\n}\n\n/**\n * Uses `list.indexOf` to implement something like `Array.prototype.includes`,\n * which we can not rely on being available.\n *\n * @param {any[]} list\n * @returns {function(any): boolean}\n */\nfunction arrayIncludes (list) {\n\treturn function(element) {\n\t\treturn list && list.indexOf(element) !== -1;\n\t}\n}\n\nfunction copy(src,dest){\n\tfor(var p in src){\n\t\tif (Object.prototype.hasOwnProperty.call(src, p)) {\n\t\t\tdest[p] = src[p];\n\t\t}\n\t}\n}\n\n/**\n^\\w+\\.prototype\\.([_\\w]+)\\s*=\\s*((?:.*\\{\\s*?[\\r\\n][\\s\\S]*?^})|\\S.*?(?=[;\\r\\n]));?\n^\\w+\\.prototype\\.([_\\w]+)\\s*=\\s*(\\S.*?(?=[;\\r\\n]));?\n */\nfunction _extends(Class,Super){\n\tvar pt = Class.prototype;\n\tif(!(pt instanceof Super)){\n\t\tfunction t(){};\n\t\tt.prototype = Super.prototype;\n\t\tt = new t();\n\t\tcopy(pt,t);\n\t\tClass.prototype = pt = t;\n\t}\n\tif(pt.constructor != Class){\n\t\tif(typeof Class != 'function'){\n\t\t\tconsole.error(\"unknown Class:\"+Class)\n\t\t}\n\t\tpt.constructor = Class\n\t}\n}\n\n// Node Types\nvar NodeType = {}\nvar ELEMENT_NODE = NodeType.ELEMENT_NODE = 1;\nvar ATTRIBUTE_NODE = NodeType.ATTRIBUTE_NODE = 2;\nvar TEXT_NODE = NodeType.TEXT_NODE = 3;\nvar CDATA_SECTION_NODE = NodeType.CDATA_SECTION_NODE = 4;\nvar ENTITY_REFERENCE_NODE = NodeType.ENTITY_REFERENCE_NODE = 5;\nvar ENTITY_NODE = NodeType.ENTITY_NODE = 6;\nvar PROCESSING_INSTRUCTION_NODE = NodeType.PROCESSING_INSTRUCTION_NODE = 7;\nvar COMMENT_NODE = NodeType.COMMENT_NODE = 8;\nvar DOCUMENT_NODE = NodeType.DOCUMENT_NODE = 9;\nvar DOCUMENT_TYPE_NODE = NodeType.DOCUMENT_TYPE_NODE = 10;\nvar DOCUMENT_FRAGMENT_NODE = NodeType.DOCUMENT_FRAGMENT_NODE = 11;\nvar NOTATION_NODE = NodeType.NOTATION_NODE = 12;\n\n// ExceptionCode\nvar ExceptionCode = {}\nvar ExceptionMessage = {};\nvar INDEX_SIZE_ERR = ExceptionCode.INDEX_SIZE_ERR = ((ExceptionMessage[1]=\"Index size error\"),1);\nvar DOMSTRING_SIZE_ERR = ExceptionCode.DOMSTRING_SIZE_ERR = ((ExceptionMessage[2]=\"DOMString size error\"),2);\nvar HIERARCHY_REQUEST_ERR = ExceptionCode.HIERARCHY_REQUEST_ERR = ((ExceptionMessage[3]=\"Hierarchy request error\"),3);\nvar WRONG_DOCUMENT_ERR = ExceptionCode.WRONG_DOCUMENT_ERR = ((ExceptionMessage[4]=\"Wrong document\"),4);\nvar INVALID_CHARACTER_ERR = ExceptionCode.INVALID_CHARACTER_ERR = ((ExceptionMessage[5]=\"Invalid character\"),5);\nvar NO_DATA_ALLOWED_ERR = ExceptionCode.NO_DATA_ALLOWED_ERR = ((ExceptionMessage[6]=\"No data allowed\"),6);\nvar NO_MODIFICATION_ALLOWED_ERR = ExceptionCode.NO_MODIFICATION_ALLOWED_ERR = ((ExceptionMessage[7]=\"No modification allowed\"),7);\nvar NOT_FOUND_ERR = ExceptionCode.NOT_FOUND_ERR = ((ExceptionMessage[8]=\"Not found\"),8);\nvar NOT_SUPPORTED_ERR = ExceptionCode.NOT_SUPPORTED_ERR = ((ExceptionMessage[9]=\"Not supported\"),9);\nvar INUSE_ATTRIBUTE_ERR = ExceptionCode.INUSE_ATTRIBUTE_ERR = ((ExceptionMessage[10]=\"Attribute in use\"),10);\n//level2\nvar INVALID_STATE_ERR \t= ExceptionCode.INVALID_STATE_ERR \t= ((ExceptionMessage[11]=\"Invalid state\"),11);\nvar SYNTAX_ERR \t= ExceptionCode.SYNTAX_ERR \t= ((ExceptionMessage[12]=\"Syntax error\"),12);\nvar INVALID_MODIFICATION_ERR \t= ExceptionCode.INVALID_MODIFICATION_ERR \t= ((ExceptionMessage[13]=\"Invalid modification\"),13);\nvar NAMESPACE_ERR \t= ExceptionCode.NAMESPACE_ERR \t= ((ExceptionMessage[14]=\"Invalid namespace\"),14);\nvar INVALID_ACCESS_ERR \t= ExceptionCode.INVALID_ACCESS_ERR \t= ((ExceptionMessage[15]=\"Invalid access\"),15);\n\n/**\n * DOM Level 2\n * Object DOMException\n * @see http://www.w3.org/TR/2000/REC-DOM-Level-2-Core-20001113/ecma-script-binding.html\n * @see http://www.w3.org/TR/REC-DOM-Level-1/ecma-script-language-binding.html\n */\nfunction DOMException(code, message) {\n\tif(message instanceof Error){\n\t\tvar error = message;\n\t}else{\n\t\terror = this;\n\t\tError.call(this, ExceptionMessage[code]);\n\t\tthis.message = ExceptionMessage[code];\n\t\tif(Error.captureStackTrace) Error.captureStackTrace(this, DOMException);\n\t}\n\terror.code = code;\n\tif(message) this.message = this.message + \": \" + message;\n\treturn error;\n};\nDOMException.prototype = Error.prototype;\ncopy(ExceptionCode,DOMException)\n\n/**\n * @see http://www.w3.org/TR/2000/REC-DOM-Level-2-Core-20001113/core.html#ID-536297177\n * The NodeList interface provides the abstraction of an ordered collection of nodes, without defining or constraining how this collection is implemented. NodeList objects in the DOM are live.\n * The items in the NodeList are accessible via an integral index, starting from 0.\n */\nfunction NodeList() {\n};\nNodeList.prototype = {\n\t/**\n\t * The number of nodes in the list. The range of valid child node indices is 0 to length-1 inclusive.\n\t * @standard level1\n\t */\n\tlength:0,\n\t/**\n\t * Returns the indexth item in the collection. If index is greater than or equal to the number of nodes in the list, this returns null.\n\t * @standard level1\n\t * @param index unsigned long\n\t * Index into the collection.\n\t * @return Node\n\t * \tThe node at the indexth position in the NodeList, or null if that is not a valid index.\n\t */\n\titem: function(index) {\n\t\treturn index >= 0 && index < this.length ? this[index] : null;\n\t},\n\ttoString:function(isHTML,nodeFilter){\n\t\tfor(var buf = [], i = 0;i=0){\n\t\tvar lastIndex = list.length-1\n\t\twhile(i0 || key == 'xmlns'){\n//\t\t\treturn null;\n//\t\t}\n\t\t//console.log()\n\t\tvar i = this.length;\n\t\twhile(i--){\n\t\t\tvar attr = this[i];\n\t\t\t//console.log(attr.nodeName,key)\n\t\t\tif(attr.nodeName == key){\n\t\t\t\treturn attr;\n\t\t\t}\n\t\t}\n\t},\n\tsetNamedItem: function(attr) {\n\t\tvar el = attr.ownerElement;\n\t\tif(el && el!=this._ownerElement){\n\t\t\tthrow new DOMException(INUSE_ATTRIBUTE_ERR);\n\t\t}\n\t\tvar oldAttr = this.getNamedItem(attr.nodeName);\n\t\t_addNamedNode(this._ownerElement,this,attr,oldAttr);\n\t\treturn oldAttr;\n\t},\n\t/* returns Node */\n\tsetNamedItemNS: function(attr) {// raises: WRONG_DOCUMENT_ERR,NO_MODIFICATION_ALLOWED_ERR,INUSE_ATTRIBUTE_ERR\n\t\tvar el = attr.ownerElement, oldAttr;\n\t\tif(el && el!=this._ownerElement){\n\t\t\tthrow new DOMException(INUSE_ATTRIBUTE_ERR);\n\t\t}\n\t\toldAttr = this.getNamedItemNS(attr.namespaceURI,attr.localName);\n\t\t_addNamedNode(this._ownerElement,this,attr,oldAttr);\n\t\treturn oldAttr;\n\t},\n\n\t/* returns Node */\n\tremoveNamedItem: function(key) {\n\t\tvar attr = this.getNamedItem(key);\n\t\t_removeNamedNode(this._ownerElement,this,attr);\n\t\treturn attr;\n\n\n\t},// raises: NOT_FOUND_ERR,NO_MODIFICATION_ALLOWED_ERR\n\n\t//for level2\n\tremoveNamedItemNS:function(namespaceURI,localName){\n\t\tvar attr = this.getNamedItemNS(namespaceURI,localName);\n\t\t_removeNamedNode(this._ownerElement,this,attr);\n\t\treturn attr;\n\t},\n\tgetNamedItemNS: function(namespaceURI, localName) {\n\t\tvar i = this.length;\n\t\twhile(i--){\n\t\t\tvar node = this[i];\n\t\t\tif(node.localName == localName && node.namespaceURI == namespaceURI){\n\t\t\t\treturn node;\n\t\t\t}\n\t\t}\n\t\treturn null;\n\t}\n};\n\n/**\n * The DOMImplementation interface represents an object providing methods\n * which are not dependent on any particular document.\n * Such an object is returned by the `Document.implementation` property.\n *\n * __The individual methods describe the differences compared to the specs.__\n *\n * @constructor\n *\n * @see https://developer.mozilla.org/en-US/docs/Web/API/DOMImplementation MDN\n * @see https://www.w3.org/TR/REC-DOM-Level-1/level-one-core.html#ID-102161490 DOM Level 1 Core (Initial)\n * @see https://www.w3.org/TR/DOM-Level-2-Core/core.html#ID-102161490 DOM Level 2 Core\n * @see https://www.w3.org/TR/DOM-Level-3-Core/core.html#ID-102161490 DOM Level 3 Core\n * @see https://dom.spec.whatwg.org/#domimplementation DOM Living Standard\n */\nfunction DOMImplementation() {\n}\n\nDOMImplementation.prototype = {\n\t/**\n\t * The DOMImplementation.hasFeature() method returns a Boolean flag indicating if a given feature is supported.\n\t * The different implementations fairly diverged in what kind of features were reported.\n\t * The latest version of the spec settled to force this method to always return true, where the functionality was accurate and in use.\n\t *\n\t * @deprecated It is deprecated and modern browsers return true in all cases.\n\t *\n\t * @param {string} feature\n\t * @param {string} [version]\n\t * @returns {boolean} always true\n\t *\n\t * @see https://developer.mozilla.org/en-US/docs/Web/API/DOMImplementation/hasFeature MDN\n\t * @see https://www.w3.org/TR/REC-DOM-Level-1/level-one-core.html#ID-5CED94D7 DOM Level 1 Core\n\t * @see https://dom.spec.whatwg.org/#dom-domimplementation-hasfeature DOM Living Standard\n\t */\n\thasFeature: function(feature, version) {\n\t\t\treturn true;\n\t},\n\t/**\n\t * Creates an XML Document object of the specified type with its document element.\n\t *\n\t * __It behaves slightly different from the description in the living standard__:\n\t * - There is no interface/class `XMLDocument`, it returns a `Document` instance.\n\t * - `contentType`, `encoding`, `mode`, `origin`, `url` fields are currently not declared.\n\t * - this implementation is not validating names or qualified names\n\t * (when parsing XML strings, the SAX parser takes care of that)\n\t *\n\t * @param {string|null} namespaceURI\n\t * @param {string} qualifiedName\n\t * @param {DocumentType=null} doctype\n\t * @returns {Document}\n\t *\n\t * @see https://developer.mozilla.org/en-US/docs/Web/API/DOMImplementation/createDocument MDN\n\t * @see https://www.w3.org/TR/DOM-Level-2-Core/core.html#Level-2-Core-DOM-createDocument DOM Level 2 Core (initial)\n\t * @see https://dom.spec.whatwg.org/#dom-domimplementation-createdocument DOM Level 2 Core\n\t *\n\t * @see https://dom.spec.whatwg.org/#validate-and-extract DOM: Validate and extract\n\t * @see https://www.w3.org/TR/xml/#NT-NameStartChar XML Spec: Names\n\t * @see https://www.w3.org/TR/xml-names/#ns-qualnames XML Namespaces: Qualified names\n\t */\n\tcreateDocument: function(namespaceURI, qualifiedName, doctype){\n\t\tvar doc = new Document();\n\t\tdoc.implementation = this;\n\t\tdoc.childNodes = new NodeList();\n\t\tdoc.doctype = doctype || null;\n\t\tif (doctype){\n\t\t\tdoc.appendChild(doctype);\n\t\t}\n\t\tif (qualifiedName){\n\t\t\tvar root = doc.createElementNS(namespaceURI, qualifiedName);\n\t\t\tdoc.appendChild(root);\n\t\t}\n\t\treturn doc;\n\t},\n\t/**\n\t * Returns a doctype, with the given `qualifiedName`, `publicId`, and `systemId`.\n\t *\n\t * __This behavior is slightly different from the in the specs__:\n\t * - this implementation is not validating names or qualified names\n\t * (when parsing XML strings, the SAX parser takes care of that)\n\t *\n\t * @param {string} qualifiedName\n\t * @param {string} [publicId]\n\t * @param {string} [systemId]\n\t * @returns {DocumentType} which can either be used with `DOMImplementation.createDocument` upon document creation\n\t * \t\t\t\t or can be put into the document via methods like `Node.insertBefore()` or `Node.replaceChild()`\n\t *\n\t * @see https://developer.mozilla.org/en-US/docs/Web/API/DOMImplementation/createDocumentType MDN\n\t * @see https://www.w3.org/TR/DOM-Level-2-Core/core.html#Level-2-Core-DOM-createDocType DOM Level 2 Core\n\t * @see https://dom.spec.whatwg.org/#dom-domimplementation-createdocumenttype DOM Living Standard\n\t *\n\t * @see https://dom.spec.whatwg.org/#validate-and-extract DOM: Validate and extract\n\t * @see https://www.w3.org/TR/xml/#NT-NameStartChar XML Spec: Names\n\t * @see https://www.w3.org/TR/xml-names/#ns-qualnames XML Namespaces: Qualified names\n\t */\n\tcreateDocumentType: function(qualifiedName, publicId, systemId){\n\t\tvar node = new DocumentType();\n\t\tnode.name = qualifiedName;\n\t\tnode.nodeName = qualifiedName;\n\t\tnode.publicId = publicId || '';\n\t\tnode.systemId = systemId || '';\n\n\t\treturn node;\n\t}\n};\n\n\n/**\n * @see http://www.w3.org/TR/2000/REC-DOM-Level-2-Core-20001113/core.html#ID-1950641247\n */\n\nfunction Node() {\n};\n\nNode.prototype = {\n\tfirstChild : null,\n\tlastChild : null,\n\tpreviousSibling : null,\n\tnextSibling : null,\n\tattributes : null,\n\tparentNode : null,\n\tchildNodes : null,\n\townerDocument : null,\n\tnodeValue : null,\n\tnamespaceURI : null,\n\tprefix : null,\n\tlocalName : null,\n\t// Modified in DOM Level 2:\n\tinsertBefore:function(newChild, refChild){//raises\n\t\treturn _insertBefore(this,newChild,refChild);\n\t},\n\treplaceChild:function(newChild, oldChild){//raises\n\t\t_insertBefore(this, newChild,oldChild, assertPreReplacementValidityInDocument);\n\t\tif(oldChild){\n\t\t\tthis.removeChild(oldChild);\n\t\t}\n\t},\n\tremoveChild:function(oldChild){\n\t\treturn _removeChild(this,oldChild);\n\t},\n\tappendChild:function(newChild){\n\t\treturn this.insertBefore(newChild,null);\n\t},\n\thasChildNodes:function(){\n\t\treturn this.firstChild != null;\n\t},\n\tcloneNode:function(deep){\n\t\treturn cloneNode(this.ownerDocument||this,this,deep);\n\t},\n\t// Modified in DOM Level 2:\n\tnormalize:function(){\n\t\tvar child = this.firstChild;\n\t\twhile(child){\n\t\t\tvar next = child.nextSibling;\n\t\t\tif(next && next.nodeType == TEXT_NODE && child.nodeType == TEXT_NODE){\n\t\t\t\tthis.removeChild(next);\n\t\t\t\tchild.appendData(next.data);\n\t\t\t}else{\n\t\t\t\tchild.normalize();\n\t\t\t\tchild = next;\n\t\t\t}\n\t\t}\n\t},\n \t// Introduced in DOM Level 2:\n\tisSupported:function(feature, version){\n\t\treturn this.ownerDocument.implementation.hasFeature(feature,version);\n\t},\n // Introduced in DOM Level 2:\n hasAttributes:function(){\n \treturn this.attributes.length>0;\n },\n\t/**\n\t * Look up the prefix associated to the given namespace URI, starting from this node.\n\t * **The default namespace declarations are ignored by this method.**\n\t * See Namespace Prefix Lookup for details on the algorithm used by this method.\n\t *\n\t * _Note: The implementation seems to be incomplete when compared to the algorithm described in the specs._\n\t *\n\t * @param {string | null} namespaceURI\n\t * @returns {string | null}\n\t * @see https://www.w3.org/TR/DOM-Level-3-Core/core.html#Node3-lookupNamespacePrefix\n\t * @see https://www.w3.org/TR/DOM-Level-3-Core/namespaces-algorithms.html#lookupNamespacePrefixAlgo\n\t * @see https://dom.spec.whatwg.org/#dom-node-lookupprefix\n\t * @see https://github.com/xmldom/xmldom/issues/322\n\t */\n lookupPrefix:function(namespaceURI){\n \tvar el = this;\n \twhile(el){\n \t\tvar map = el._nsMap;\n \t\t//console.dir(map)\n \t\tif(map){\n \t\t\tfor(var n in map){\n\t\t\t\t\t\tif (Object.prototype.hasOwnProperty.call(map, n) && map[n] === namespaceURI) {\n\t\t\t\t\t\t\treturn n;\n\t\t\t\t\t\t}\n \t\t\t}\n \t\t}\n \t\tel = el.nodeType == ATTRIBUTE_NODE?el.ownerDocument : el.parentNode;\n \t}\n \treturn null;\n },\n // Introduced in DOM Level 3:\n lookupNamespaceURI:function(prefix){\n \tvar el = this;\n \twhile(el){\n \t\tvar map = el._nsMap;\n \t\t//console.dir(map)\n \t\tif(map){\n \t\t\tif(Object.prototype.hasOwnProperty.call(map, prefix)){\n \t\t\t\treturn map[prefix] ;\n \t\t\t}\n \t\t}\n \t\tel = el.nodeType == ATTRIBUTE_NODE?el.ownerDocument : el.parentNode;\n \t}\n \treturn null;\n },\n // Introduced in DOM Level 3:\n isDefaultNamespace:function(namespaceURI){\n \tvar prefix = this.lookupPrefix(namespaceURI);\n \treturn prefix == null;\n }\n};\n\n\nfunction _xmlEncoder(c){\n\treturn c == '<' && '<' ||\n c == '>' && '>' ||\n c == '&' && '&' ||\n c == '\"' && '"' ||\n '&#'+c.charCodeAt()+';'\n}\n\n\ncopy(NodeType,Node);\ncopy(NodeType,Node.prototype);\n\n/**\n * @param callback return true for continue,false for break\n * @return boolean true: break visit;\n */\nfunction _visitNode(node,callback){\n\tif(callback(node)){\n\t\treturn true;\n\t}\n\tif(node = node.firstChild){\n\t\tdo{\n\t\t\tif(_visitNode(node,callback)){return true}\n }while(node=node.nextSibling)\n }\n}\n\n\n\nfunction Document(){\n\tthis.ownerDocument = this;\n}\n\nfunction _onAddAttribute(doc,el,newAttr){\n\tdoc && doc._inc++;\n\tvar ns = newAttr.namespaceURI ;\n\tif(ns === NAMESPACE.XMLNS){\n\t\t//update namespace\n\t\tel._nsMap[newAttr.prefix?newAttr.localName:''] = newAttr.value\n\t}\n}\n\nfunction _onRemoveAttribute(doc,el,newAttr,remove){\n\tdoc && doc._inc++;\n\tvar ns = newAttr.namespaceURI ;\n\tif(ns === NAMESPACE.XMLNS){\n\t\t//update namespace\n\t\tdelete el._nsMap[newAttr.prefix?newAttr.localName:'']\n\t}\n}\n\n/**\n * Updates `el.childNodes`, updating the indexed items and it's `length`.\n * Passing `newChild` means it will be appended.\n * Otherwise it's assumed that an item has been removed,\n * and `el.firstNode` and it's `.nextSibling` are used\n * to walk the current list of child nodes.\n *\n * @param {Document} doc\n * @param {Node} el\n * @param {Node} [newChild]\n * @private\n */\nfunction _onUpdateChild (doc, el, newChild) {\n\tif(doc && doc._inc){\n\t\tdoc._inc++;\n\t\t//update childNodes\n\t\tvar cs = el.childNodes;\n\t\tif (newChild) {\n\t\t\tcs[cs.length++] = newChild;\n\t\t} else {\n\t\t\tvar child = el.firstChild;\n\t\t\tvar i = 0;\n\t\t\twhile (child) {\n\t\t\t\tcs[i++] = child;\n\t\t\t\tchild = child.nextSibling;\n\t\t\t}\n\t\t\tcs.length = i;\n\t\t\tdelete cs[cs.length];\n\t\t}\n\t}\n}\n\n/**\n * Removes the connections between `parentNode` and `child`\n * and any existing `child.previousSibling` or `child.nextSibling`.\n *\n * @see https://github.com/xmldom/xmldom/issues/135\n * @see https://github.com/xmldom/xmldom/issues/145\n *\n * @param {Node} parentNode\n * @param {Node} child\n * @returns {Node} the child that was removed.\n * @private\n */\nfunction _removeChild (parentNode, child) {\n\tvar previous = child.previousSibling;\n\tvar next = child.nextSibling;\n\tif (previous) {\n\t\tprevious.nextSibling = next;\n\t} else {\n\t\tparentNode.firstChild = next;\n\t}\n\tif (next) {\n\t\tnext.previousSibling = previous;\n\t} else {\n\t\tparentNode.lastChild = previous;\n\t}\n\tchild.parentNode = null;\n\tchild.previousSibling = null;\n\tchild.nextSibling = null;\n\t_onUpdateChild(parentNode.ownerDocument, parentNode);\n\treturn child;\n}\n\n/**\n * Returns `true` if `node` can be a parent for insertion.\n * @param {Node} node\n * @returns {boolean}\n */\nfunction hasValidParentNodeType(node) {\n\treturn (\n\t\tnode &&\n\t\t(node.nodeType === Node.DOCUMENT_NODE || node.nodeType === Node.DOCUMENT_FRAGMENT_NODE || node.nodeType === Node.ELEMENT_NODE)\n\t);\n}\n\n/**\n * Returns `true` if `node` can be inserted according to it's `nodeType`.\n * @param {Node} node\n * @returns {boolean}\n */\nfunction hasInsertableNodeType(node) {\n\treturn (\n\t\tnode &&\n\t\t(isElementNode(node) ||\n\t\t\tisTextNode(node) ||\n\t\t\tisDocTypeNode(node) ||\n\t\t\tnode.nodeType === Node.DOCUMENT_FRAGMENT_NODE ||\n\t\t\tnode.nodeType === Node.COMMENT_NODE ||\n\t\t\tnode.nodeType === Node.PROCESSING_INSTRUCTION_NODE)\n\t);\n}\n\n/**\n * Returns true if `node` is a DOCTYPE node\n * @param {Node} node\n * @returns {boolean}\n */\nfunction isDocTypeNode(node) {\n\treturn node && node.nodeType === Node.DOCUMENT_TYPE_NODE;\n}\n\n/**\n * Returns true if the node is an element\n * @param {Node} node\n * @returns {boolean}\n */\nfunction isElementNode(node) {\n\treturn node && node.nodeType === Node.ELEMENT_NODE;\n}\n/**\n * Returns true if `node` is a text node\n * @param {Node} node\n * @returns {boolean}\n */\nfunction isTextNode(node) {\n\treturn node && node.nodeType === Node.TEXT_NODE;\n}\n\n/**\n * Check if en element node can be inserted before `child`, or at the end if child is falsy,\n * according to the presence and position of a doctype node on the same level.\n *\n * @param {Document} doc The document node\n * @param {Node} child the node that would become the nextSibling if the element would be inserted\n * @returns {boolean} `true` if an element can be inserted before child\n * @private\n * https://dom.spec.whatwg.org/#concept-node-ensure-pre-insertion-validity\n */\nfunction isElementInsertionPossible(doc, child) {\n\tvar parentChildNodes = doc.childNodes || [];\n\tif (find(parentChildNodes, isElementNode) || isDocTypeNode(child)) {\n\t\treturn false;\n\t}\n\tvar docTypeNode = find(parentChildNodes, isDocTypeNode);\n\treturn !(child && docTypeNode && parentChildNodes.indexOf(docTypeNode) > parentChildNodes.indexOf(child));\n}\n\n/**\n * Check if en element node can be inserted before `child`, or at the end if child is falsy,\n * according to the presence and position of a doctype node on the same level.\n *\n * @param {Node} doc The document node\n * @param {Node} child the node that would become the nextSibling if the element would be inserted\n * @returns {boolean} `true` if an element can be inserted before child\n * @private\n * https://dom.spec.whatwg.org/#concept-node-ensure-pre-insertion-validity\n */\nfunction isElementReplacementPossible(doc, child) {\n\tvar parentChildNodes = doc.childNodes || [];\n\n\tfunction hasElementChildThatIsNotChild(node) {\n\t\treturn isElementNode(node) && node !== child;\n\t}\n\n\tif (find(parentChildNodes, hasElementChildThatIsNotChild)) {\n\t\treturn false;\n\t}\n\tvar docTypeNode = find(parentChildNodes, isDocTypeNode);\n\treturn !(child && docTypeNode && parentChildNodes.indexOf(docTypeNode) > parentChildNodes.indexOf(child));\n}\n\n/**\n * @private\n * Steps 1-5 of the checks before inserting and before replacing a child are the same.\n *\n * @param {Node} parent the parent node to insert `node` into\n * @param {Node} node the node to insert\n * @param {Node=} child the node that should become the `nextSibling` of `node`\n * @returns {Node}\n * @throws DOMException for several node combinations that would create a DOM that is not well-formed.\n * @throws DOMException if `child` is provided but is not a child of `parent`.\n * @see https://dom.spec.whatwg.org/#concept-node-ensure-pre-insertion-validity\n * @see https://dom.spec.whatwg.org/#concept-node-replace\n */\nfunction assertPreInsertionValidity1to5(parent, node, child) {\n\t// 1. If `parent` is not a Document, DocumentFragment, or Element node, then throw a \"HierarchyRequestError\" DOMException.\n\tif (!hasValidParentNodeType(parent)) {\n\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Unexpected parent node type ' + parent.nodeType);\n\t}\n\t// 2. If `node` is a host-including inclusive ancestor of `parent`, then throw a \"HierarchyRequestError\" DOMException.\n\t// not implemented!\n\t// 3. If `child` is non-null and its parent is not `parent`, then throw a \"NotFoundError\" DOMException.\n\tif (child && child.parentNode !== parent) {\n\t\tthrow new DOMException(NOT_FOUND_ERR, 'child not in parent');\n\t}\n\tif (\n\t\t// 4. If `node` is not a DocumentFragment, DocumentType, Element, or CharacterData node, then throw a \"HierarchyRequestError\" DOMException.\n\t\t!hasInsertableNodeType(node) ||\n\t\t// 5. If either `node` is a Text node and `parent` is a document,\n\t\t// the sax parser currently adds top level text nodes, this will be fixed in 0.9.0\n\t\t// || (node.nodeType === Node.TEXT_NODE && parent.nodeType === Node.DOCUMENT_NODE)\n\t\t// or `node` is a doctype and `parent` is not a document, then throw a \"HierarchyRequestError\" DOMException.\n\t\t(isDocTypeNode(node) && parent.nodeType !== Node.DOCUMENT_NODE)\n\t) {\n\t\tthrow new DOMException(\n\t\t\tHIERARCHY_REQUEST_ERR,\n\t\t\t'Unexpected node type ' + node.nodeType + ' for parent node type ' + parent.nodeType\n\t\t);\n\t}\n}\n\n/**\n * @private\n * Step 6 of the checks before inserting and before replacing a child are different.\n *\n * @param {Document} parent the parent node to insert `node` into\n * @param {Node} node the node to insert\n * @param {Node | undefined} child the node that should become the `nextSibling` of `node`\n * @returns {Node}\n * @throws DOMException for several node combinations that would create a DOM that is not well-formed.\n * @throws DOMException if `child` is provided but is not a child of `parent`.\n * @see https://dom.spec.whatwg.org/#concept-node-ensure-pre-insertion-validity\n * @see https://dom.spec.whatwg.org/#concept-node-replace\n */\nfunction assertPreInsertionValidityInDocument(parent, node, child) {\n\tvar parentChildNodes = parent.childNodes || [];\n\tvar nodeChildNodes = node.childNodes || [];\n\n\t// DocumentFragment\n\tif (node.nodeType === Node.DOCUMENT_FRAGMENT_NODE) {\n\t\tvar nodeChildElements = nodeChildNodes.filter(isElementNode);\n\t\t// If node has more than one element child or has a Text node child.\n\t\tif (nodeChildElements.length > 1 || find(nodeChildNodes, isTextNode)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'More than one element or text in fragment');\n\t\t}\n\t\t// Otherwise, if `node` has one element child and either `parent` has an element child,\n\t\t// `child` is a doctype, or `child` is non-null and a doctype is following `child`.\n\t\tif (nodeChildElements.length === 1 && !isElementInsertionPossible(parent, child)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Element in fragment can not be inserted before doctype');\n\t\t}\n\t}\n\t// Element\n\tif (isElementNode(node)) {\n\t\t// `parent` has an element child, `child` is a doctype,\n\t\t// or `child` is non-null and a doctype is following `child`.\n\t\tif (!isElementInsertionPossible(parent, child)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Only one element can be added and only after doctype');\n\t\t}\n\t}\n\t// DocumentType\n\tif (isDocTypeNode(node)) {\n\t\t// `parent` has a doctype child,\n\t\tif (find(parentChildNodes, isDocTypeNode)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Only one doctype is allowed');\n\t\t}\n\t\tvar parentElementChild = find(parentChildNodes, isElementNode);\n\t\t// `child` is non-null and an element is preceding `child`,\n\t\tif (child && parentChildNodes.indexOf(parentElementChild) < parentChildNodes.indexOf(child)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Doctype can only be inserted before an element');\n\t\t}\n\t\t// or `child` is null and `parent` has an element child.\n\t\tif (!child && parentElementChild) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Doctype can not be appended since element is present');\n\t\t}\n\t}\n}\n\n/**\n * @private\n * Step 6 of the checks before inserting and before replacing a child are different.\n *\n * @param {Document} parent the parent node to insert `node` into\n * @param {Node} node the node to insert\n * @param {Node | undefined} child the node that should become the `nextSibling` of `node`\n * @returns {Node}\n * @throws DOMException for several node combinations that would create a DOM that is not well-formed.\n * @throws DOMException if `child` is provided but is not a child of `parent`.\n * @see https://dom.spec.whatwg.org/#concept-node-ensure-pre-insertion-validity\n * @see https://dom.spec.whatwg.org/#concept-node-replace\n */\nfunction assertPreReplacementValidityInDocument(parent, node, child) {\n\tvar parentChildNodes = parent.childNodes || [];\n\tvar nodeChildNodes = node.childNodes || [];\n\n\t// DocumentFragment\n\tif (node.nodeType === Node.DOCUMENT_FRAGMENT_NODE) {\n\t\tvar nodeChildElements = nodeChildNodes.filter(isElementNode);\n\t\t// If `node` has more than one element child or has a Text node child.\n\t\tif (nodeChildElements.length > 1 || find(nodeChildNodes, isTextNode)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'More than one element or text in fragment');\n\t\t}\n\t\t// Otherwise, if `node` has one element child and either `parent` has an element child that is not `child` or a doctype is following `child`.\n\t\tif (nodeChildElements.length === 1 && !isElementReplacementPossible(parent, child)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Element in fragment can not be inserted before doctype');\n\t\t}\n\t}\n\t// Element\n\tif (isElementNode(node)) {\n\t\t// `parent` has an element child that is not `child` or a doctype is following `child`.\n\t\tif (!isElementReplacementPossible(parent, child)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Only one element can be added and only after doctype');\n\t\t}\n\t}\n\t// DocumentType\n\tif (isDocTypeNode(node)) {\n\t\tfunction hasDoctypeChildThatIsNotChild(node) {\n\t\t\treturn isDocTypeNode(node) && node !== child;\n\t\t}\n\n\t\t// `parent` has a doctype child that is not `child`,\n\t\tif (find(parentChildNodes, hasDoctypeChildThatIsNotChild)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Only one doctype is allowed');\n\t\t}\n\t\tvar parentElementChild = find(parentChildNodes, isElementNode);\n\t\t// or an element is preceding `child`.\n\t\tif (child && parentChildNodes.indexOf(parentElementChild) < parentChildNodes.indexOf(child)) {\n\t\t\tthrow new DOMException(HIERARCHY_REQUEST_ERR, 'Doctype can only be inserted before an element');\n\t\t}\n\t}\n}\n\n/**\n * @private\n * @param {Node} parent the parent node to insert `node` into\n * @param {Node} node the node to insert\n * @param {Node=} child the node that should become the `nextSibling` of `node`\n * @returns {Node}\n * @throws DOMException for several node combinations that would create a DOM that is not well-formed.\n * @throws DOMException if `child` is provided but is not a child of `parent`.\n * @see https://dom.spec.whatwg.org/#concept-node-ensure-pre-insertion-validity\n */\nfunction _insertBefore(parent, node, child, _inDocumentAssertion) {\n\t// To ensure pre-insertion validity of a node into a parent before a child, run these steps:\n\tassertPreInsertionValidity1to5(parent, node, child);\n\n\t// If parent is a document, and any of the statements below, switched on the interface node implements,\n\t// are true, then throw a \"HierarchyRequestError\" DOMException.\n\tif (parent.nodeType === Node.DOCUMENT_NODE) {\n\t\t(_inDocumentAssertion || assertPreInsertionValidityInDocument)(parent, node, child);\n\t}\n\n\tvar cp = node.parentNode;\n\tif(cp){\n\t\tcp.removeChild(node);//remove and update\n\t}\n\tif(node.nodeType === DOCUMENT_FRAGMENT_NODE){\n\t\tvar newFirst = node.firstChild;\n\t\tif (newFirst == null) {\n\t\t\treturn node;\n\t\t}\n\t\tvar newLast = node.lastChild;\n\t}else{\n\t\tnewFirst = newLast = node;\n\t}\n\tvar pre = child ? child.previousSibling : parent.lastChild;\n\n\tnewFirst.previousSibling = pre;\n\tnewLast.nextSibling = child;\n\n\n\tif(pre){\n\t\tpre.nextSibling = newFirst;\n\t}else{\n\t\tparent.firstChild = newFirst;\n\t}\n\tif(child == null){\n\t\tparent.lastChild = newLast;\n\t}else{\n\t\tchild.previousSibling = newLast;\n\t}\n\tdo{\n\t\tnewFirst.parentNode = parent;\n\t}while(newFirst !== newLast && (newFirst= newFirst.nextSibling))\n\t_onUpdateChild(parent.ownerDocument||parent, parent);\n\t//console.log(parent.lastChild.nextSibling == null)\n\tif (node.nodeType == DOCUMENT_FRAGMENT_NODE) {\n\t\tnode.firstChild = node.lastChild = null;\n\t}\n\treturn node;\n}\n\n/**\n * Appends `newChild` to `parentNode`.\n * If `newChild` is already connected to a `parentNode` it is first removed from it.\n *\n * @see https://github.com/xmldom/xmldom/issues/135\n * @see https://github.com/xmldom/xmldom/issues/145\n * @param {Node} parentNode\n * @param {Node} newChild\n * @returns {Node}\n * @private\n */\nfunction _appendSingleChild (parentNode, newChild) {\n\tif (newChild.parentNode) {\n\t\tnewChild.parentNode.removeChild(newChild);\n\t}\n\tnewChild.parentNode = parentNode;\n\tnewChild.previousSibling = parentNode.lastChild;\n\tnewChild.nextSibling = null;\n\tif (newChild.previousSibling) {\n\t\tnewChild.previousSibling.nextSibling = newChild;\n\t} else {\n\t\tparentNode.firstChild = newChild;\n\t}\n\tparentNode.lastChild = newChild;\n\t_onUpdateChild(parentNode.ownerDocument, parentNode, newChild);\n\treturn newChild;\n}\n\nDocument.prototype = {\n\t//implementation : null,\n\tnodeName : '#document',\n\tnodeType : DOCUMENT_NODE,\n\t/**\n\t * The DocumentType node of the document.\n\t *\n\t * @readonly\n\t * @type DocumentType\n\t */\n\tdoctype : null,\n\tdocumentElement : null,\n\t_inc : 1,\n\n\tinsertBefore : function(newChild, refChild){//raises\n\t\tif(newChild.nodeType == DOCUMENT_FRAGMENT_NODE){\n\t\t\tvar child = newChild.firstChild;\n\t\t\twhile(child){\n\t\t\t\tvar next = child.nextSibling;\n\t\t\t\tthis.insertBefore(child,refChild);\n\t\t\t\tchild = next;\n\t\t\t}\n\t\t\treturn newChild;\n\t\t}\n\t\t_insertBefore(this, newChild, refChild);\n\t\tnewChild.ownerDocument = this;\n\t\tif (this.documentElement === null && newChild.nodeType === ELEMENT_NODE) {\n\t\t\tthis.documentElement = newChild;\n\t\t}\n\n\t\treturn newChild;\n\t},\n\tremoveChild : function(oldChild){\n\t\tif(this.documentElement == oldChild){\n\t\t\tthis.documentElement = null;\n\t\t}\n\t\treturn _removeChild(this,oldChild);\n\t},\n\treplaceChild: function (newChild, oldChild) {\n\t\t//raises\n\t\t_insertBefore(this, newChild, oldChild, assertPreReplacementValidityInDocument);\n\t\tnewChild.ownerDocument = this;\n\t\tif (oldChild) {\n\t\t\tthis.removeChild(oldChild);\n\t\t}\n\t\tif (isElementNode(newChild)) {\n\t\t\tthis.documentElement = newChild;\n\t\t}\n\t},\n\t// Introduced in DOM Level 2:\n\timportNode : function(importedNode,deep){\n\t\treturn importNode(this,importedNode,deep);\n\t},\n\t// Introduced in DOM Level 2:\n\tgetElementById :\tfunction(id){\n\t\tvar rtv = null;\n\t\t_visitNode(this.documentElement,function(node){\n\t\t\tif(node.nodeType == ELEMENT_NODE){\n\t\t\t\tif(node.getAttribute('id') == id){\n\t\t\t\t\trtv = node;\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\treturn rtv;\n\t},\n\n\t/**\n\t * The `getElementsByClassName` method of `Document` interface returns an array-like object\n\t * of all child elements which have **all** of the given class name(s).\n\t *\n\t * Returns an empty list if `classeNames` is an empty string or only contains HTML white space characters.\n\t *\n\t *\n\t * Warning: This is a live LiveNodeList.\n\t * Changes in the DOM will reflect in the array as the changes occur.\n\t * If an element selected by this array no longer qualifies for the selector,\n\t * it will automatically be removed. Be aware of this for iteration purposes.\n\t *\n\t * @param {string} classNames is a string representing the class name(s) to match; multiple class names are separated by (ASCII-)whitespace\n\t *\n\t * @see https://developer.mozilla.org/en-US/docs/Web/API/Document/getElementsByClassName\n\t * @see https://dom.spec.whatwg.org/#concept-getelementsbyclassname\n\t */\n\tgetElementsByClassName: function(classNames) {\n\t\tvar classNamesSet = toOrderedSet(classNames)\n\t\treturn new LiveNodeList(this, function(base) {\n\t\t\tvar ls = [];\n\t\t\tif (classNamesSet.length > 0) {\n\t\t\t\t_visitNode(base.documentElement, function(node) {\n\t\t\t\t\tif(node !== base && node.nodeType === ELEMENT_NODE) {\n\t\t\t\t\t\tvar nodeClassNames = node.getAttribute('class')\n\t\t\t\t\t\t// can be null if the attribute does not exist\n\t\t\t\t\t\tif (nodeClassNames) {\n\t\t\t\t\t\t\t// before splitting and iterating just compare them for the most common case\n\t\t\t\t\t\t\tvar matches = classNames === nodeClassNames;\n\t\t\t\t\t\t\tif (!matches) {\n\t\t\t\t\t\t\t\tvar nodeClassNamesSet = toOrderedSet(nodeClassNames)\n\t\t\t\t\t\t\t\tmatches = classNamesSet.every(arrayIncludes(nodeClassNamesSet))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif(matches) {\n\t\t\t\t\t\t\t\tls.push(node);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t\treturn ls;\n\t\t});\n\t},\n\n\t//document factory method:\n\tcreateElement :\tfunction(tagName){\n\t\tvar node = new Element();\n\t\tnode.ownerDocument = this;\n\t\tnode.nodeName = tagName;\n\t\tnode.tagName = tagName;\n\t\tnode.localName = tagName;\n\t\tnode.childNodes = new NodeList();\n\t\tvar attrs\t= node.attributes = new NamedNodeMap();\n\t\tattrs._ownerElement = node;\n\t\treturn node;\n\t},\n\tcreateDocumentFragment :\tfunction(){\n\t\tvar node = new DocumentFragment();\n\t\tnode.ownerDocument = this;\n\t\tnode.childNodes = new NodeList();\n\t\treturn node;\n\t},\n\tcreateTextNode :\tfunction(data){\n\t\tvar node = new Text();\n\t\tnode.ownerDocument = this;\n\t\tnode.appendData(data)\n\t\treturn node;\n\t},\n\tcreateComment :\tfunction(data){\n\t\tvar node = new Comment();\n\t\tnode.ownerDocument = this;\n\t\tnode.appendData(data)\n\t\treturn node;\n\t},\n\tcreateCDATASection :\tfunction(data){\n\t\tvar node = new CDATASection();\n\t\tnode.ownerDocument = this;\n\t\tnode.appendData(data)\n\t\treturn node;\n\t},\n\tcreateProcessingInstruction :\tfunction(target,data){\n\t\tvar node = new ProcessingInstruction();\n\t\tnode.ownerDocument = this;\n\t\tnode.tagName = node.nodeName = node.target = target;\n\t\tnode.nodeValue = node.data = data;\n\t\treturn node;\n\t},\n\tcreateAttribute :\tfunction(name){\n\t\tvar node = new Attr();\n\t\tnode.ownerDocument\t= this;\n\t\tnode.name = name;\n\t\tnode.nodeName\t= name;\n\t\tnode.localName = name;\n\t\tnode.specified = true;\n\t\treturn node;\n\t},\n\tcreateEntityReference :\tfunction(name){\n\t\tvar node = new EntityReference();\n\t\tnode.ownerDocument\t= this;\n\t\tnode.nodeName\t= name;\n\t\treturn node;\n\t},\n\t// Introduced in DOM Level 2:\n\tcreateElementNS :\tfunction(namespaceURI,qualifiedName){\n\t\tvar node = new Element();\n\t\tvar pl = qualifiedName.split(':');\n\t\tvar attrs\t= node.attributes = new NamedNodeMap();\n\t\tnode.childNodes = new NodeList();\n\t\tnode.ownerDocument = this;\n\t\tnode.nodeName = qualifiedName;\n\t\tnode.tagName = qualifiedName;\n\t\tnode.namespaceURI = namespaceURI;\n\t\tif(pl.length == 2){\n\t\t\tnode.prefix = pl[0];\n\t\t\tnode.localName = pl[1];\n\t\t}else{\n\t\t\t//el.prefix = null;\n\t\t\tnode.localName = qualifiedName;\n\t\t}\n\t\tattrs._ownerElement = node;\n\t\treturn node;\n\t},\n\t// Introduced in DOM Level 2:\n\tcreateAttributeNS :\tfunction(namespaceURI,qualifiedName){\n\t\tvar node = new Attr();\n\t\tvar pl = qualifiedName.split(':');\n\t\tnode.ownerDocument = this;\n\t\tnode.nodeName = qualifiedName;\n\t\tnode.name = qualifiedName;\n\t\tnode.namespaceURI = namespaceURI;\n\t\tnode.specified = true;\n\t\tif(pl.length == 2){\n\t\t\tnode.prefix = pl[0];\n\t\t\tnode.localName = pl[1];\n\t\t}else{\n\t\t\t//el.prefix = null;\n\t\t\tnode.localName = qualifiedName;\n\t\t}\n\t\treturn node;\n\t}\n};\n_extends(Document,Node);\n\n\nfunction Element() {\n\tthis._nsMap = {};\n};\nElement.prototype = {\n\tnodeType : ELEMENT_NODE,\n\thasAttribute : function(name){\n\t\treturn this.getAttributeNode(name)!=null;\n\t},\n\tgetAttribute : function(name){\n\t\tvar attr = this.getAttributeNode(name);\n\t\treturn attr && attr.value || '';\n\t},\n\tgetAttributeNode : function(name){\n\t\treturn this.attributes.getNamedItem(name);\n\t},\n\tsetAttribute : function(name, value){\n\t\tvar attr = this.ownerDocument.createAttribute(name);\n\t\tattr.value = attr.nodeValue = \"\" + value;\n\t\tthis.setAttributeNode(attr)\n\t},\n\tremoveAttribute : function(name){\n\t\tvar attr = this.getAttributeNode(name)\n\t\tattr && this.removeAttributeNode(attr);\n\t},\n\n\t//four real opeartion method\n\tappendChild:function(newChild){\n\t\tif(newChild.nodeType === DOCUMENT_FRAGMENT_NODE){\n\t\t\treturn this.insertBefore(newChild,null);\n\t\t}else{\n\t\t\treturn _appendSingleChild(this,newChild);\n\t\t}\n\t},\n\tsetAttributeNode : function(newAttr){\n\t\treturn this.attributes.setNamedItem(newAttr);\n\t},\n\tsetAttributeNodeNS : function(newAttr){\n\t\treturn this.attributes.setNamedItemNS(newAttr);\n\t},\n\tremoveAttributeNode : function(oldAttr){\n\t\t//console.log(this == oldAttr.ownerElement)\n\t\treturn this.attributes.removeNamedItem(oldAttr.nodeName);\n\t},\n\t//get real attribute name,and remove it by removeAttributeNode\n\tremoveAttributeNS : function(namespaceURI, localName){\n\t\tvar old = this.getAttributeNodeNS(namespaceURI, localName);\n\t\told && this.removeAttributeNode(old);\n\t},\n\n\thasAttributeNS : function(namespaceURI, localName){\n\t\treturn this.getAttributeNodeNS(namespaceURI, localName)!=null;\n\t},\n\tgetAttributeNS : function(namespaceURI, localName){\n\t\tvar attr = this.getAttributeNodeNS(namespaceURI, localName);\n\t\treturn attr && attr.value || '';\n\t},\n\tsetAttributeNS : function(namespaceURI, qualifiedName, value){\n\t\tvar attr = this.ownerDocument.createAttributeNS(namespaceURI, qualifiedName);\n\t\tattr.value = attr.nodeValue = \"\" + value;\n\t\tthis.setAttributeNode(attr)\n\t},\n\tgetAttributeNodeNS : function(namespaceURI, localName){\n\t\treturn this.attributes.getNamedItemNS(namespaceURI, localName);\n\t},\n\n\tgetElementsByTagName : function(tagName){\n\t\treturn new LiveNodeList(this,function(base){\n\t\t\tvar ls = [];\n\t\t\t_visitNode(base,function(node){\n\t\t\t\tif(node !== base && node.nodeType == ELEMENT_NODE && (tagName === '*' || node.tagName == tagName)){\n\t\t\t\t\tls.push(node);\n\t\t\t\t}\n\t\t\t});\n\t\t\treturn ls;\n\t\t});\n\t},\n\tgetElementsByTagNameNS : function(namespaceURI, localName){\n\t\treturn new LiveNodeList(this,function(base){\n\t\t\tvar ls = [];\n\t\t\t_visitNode(base,function(node){\n\t\t\t\tif(node !== base && node.nodeType === ELEMENT_NODE && (namespaceURI === '*' || node.namespaceURI === namespaceURI) && (localName === '*' || node.localName == localName)){\n\t\t\t\t\tls.push(node);\n\t\t\t\t}\n\t\t\t});\n\t\t\treturn ls;\n\n\t\t});\n\t}\n};\nDocument.prototype.getElementsByTagName = Element.prototype.getElementsByTagName;\nDocument.prototype.getElementsByTagNameNS = Element.prototype.getElementsByTagNameNS;\n\n\n_extends(Element,Node);\nfunction Attr() {\n};\nAttr.prototype.nodeType = ATTRIBUTE_NODE;\n_extends(Attr,Node);\n\n\nfunction CharacterData() {\n};\nCharacterData.prototype = {\n\tdata : '',\n\tsubstringData : function(offset, count) {\n\t\treturn this.data.substring(offset, offset+count);\n\t},\n\tappendData: function(text) {\n\t\ttext = this.data+text;\n\t\tthis.nodeValue = this.data = text;\n\t\tthis.length = text.length;\n\t},\n\tinsertData: function(offset,text) {\n\t\tthis.replaceData(offset,0,text);\n\n\t},\n\tappendChild:function(newChild){\n\t\tthrow new Error(ExceptionMessage[HIERARCHY_REQUEST_ERR])\n\t},\n\tdeleteData: function(offset, count) {\n\t\tthis.replaceData(offset,count,\"\");\n\t},\n\treplaceData: function(offset, count, text) {\n\t\tvar start = this.data.substring(0,offset);\n\t\tvar end = this.data.substring(offset+count);\n\t\ttext = start + text + end;\n\t\tthis.nodeValue = this.data = text;\n\t\tthis.length = text.length;\n\t}\n}\n_extends(CharacterData,Node);\nfunction Text() {\n};\nText.prototype = {\n\tnodeName : \"#text\",\n\tnodeType : TEXT_NODE,\n\tsplitText : function(offset) {\n\t\tvar text = this.data;\n\t\tvar newText = text.substring(offset);\n\t\ttext = text.substring(0, offset);\n\t\tthis.data = this.nodeValue = text;\n\t\tthis.length = text.length;\n\t\tvar newNode = this.ownerDocument.createTextNode(newText);\n\t\tif(this.parentNode){\n\t\t\tthis.parentNode.insertBefore(newNode, this.nextSibling);\n\t\t}\n\t\treturn newNode;\n\t}\n}\n_extends(Text,CharacterData);\nfunction Comment() {\n};\nComment.prototype = {\n\tnodeName : \"#comment\",\n\tnodeType : COMMENT_NODE\n}\n_extends(Comment,CharacterData);\n\nfunction CDATASection() {\n};\nCDATASection.prototype = {\n\tnodeName : \"#cdata-section\",\n\tnodeType : CDATA_SECTION_NODE\n}\n_extends(CDATASection,CharacterData);\n\n\nfunction DocumentType() {\n};\nDocumentType.prototype.nodeType = DOCUMENT_TYPE_NODE;\n_extends(DocumentType,Node);\n\nfunction Notation() {\n};\nNotation.prototype.nodeType = NOTATION_NODE;\n_extends(Notation,Node);\n\nfunction Entity() {\n};\nEntity.prototype.nodeType = ENTITY_NODE;\n_extends(Entity,Node);\n\nfunction EntityReference() {\n};\nEntityReference.prototype.nodeType = ENTITY_REFERENCE_NODE;\n_extends(EntityReference,Node);\n\nfunction DocumentFragment() {\n};\nDocumentFragment.prototype.nodeName =\t\"#document-fragment\";\nDocumentFragment.prototype.nodeType =\tDOCUMENT_FRAGMENT_NODE;\n_extends(DocumentFragment,Node);\n\n\nfunction ProcessingInstruction() {\n}\nProcessingInstruction.prototype.nodeType = PROCESSING_INSTRUCTION_NODE;\n_extends(ProcessingInstruction,Node);\nfunction XMLSerializer(){}\nXMLSerializer.prototype.serializeToString = function(node,isHtml,nodeFilter){\n\treturn nodeSerializeToString.call(node,isHtml,nodeFilter);\n}\nNode.prototype.toString = nodeSerializeToString;\nfunction nodeSerializeToString(isHtml,nodeFilter){\n\tvar buf = [];\n\tvar refNode = this.nodeType == 9 && this.documentElement || this;\n\tvar prefix = refNode.prefix;\n\tvar uri = refNode.namespaceURI;\n\n\tif(uri && prefix == null){\n\t\t//console.log(prefix)\n\t\tvar prefix = refNode.lookupPrefix(uri);\n\t\tif(prefix == null){\n\t\t\t//isHTML = true;\n\t\t\tvar visibleNamespaces=[\n\t\t\t{namespace:uri,prefix:null}\n\t\t\t//{namespace:uri,prefix:''}\n\t\t\t]\n\t\t}\n\t}\n\tserializeToString(this,buf,isHtml,nodeFilter,visibleNamespaces);\n\t//console.log('###',this.nodeType,uri,prefix,buf.join(''))\n\treturn buf.join('');\n}\n\nfunction needNamespaceDefine(node, isHTML, visibleNamespaces) {\n\tvar prefix = node.prefix || '';\n\tvar uri = node.namespaceURI;\n\t// According to [Namespaces in XML 1.0](https://www.w3.org/TR/REC-xml-names/#ns-using) ,\n\t// and more specifically https://www.w3.org/TR/REC-xml-names/#nsc-NoPrefixUndecl :\n\t// > In a namespace declaration for a prefix [...], the attribute value MUST NOT be empty.\n\t// in a similar manner [Namespaces in XML 1.1](https://www.w3.org/TR/xml-names11/#ns-using)\n\t// and more specifically https://www.w3.org/TR/xml-names11/#nsc-NSDeclared :\n\t// > [...] Furthermore, the attribute value [...] must not be an empty string.\n\t// so serializing empty namespace value like xmlns:ds=\"\" would produce an invalid XML document.\n\tif (!uri) {\n\t\treturn false;\n\t}\n\tif (prefix === \"xml\" && uri === NAMESPACE.XML || uri === NAMESPACE.XMLNS) {\n\t\treturn false;\n\t}\n\n\tvar i = visibleNamespaces.length\n\twhile (i--) {\n\t\tvar ns = visibleNamespaces[i];\n\t\t// get namespace prefix\n\t\tif (ns.prefix === prefix) {\n\t\t\treturn ns.namespace !== uri;\n\t\t}\n\t}\n\treturn true;\n}\n/**\n * Well-formed constraint: No < in Attribute Values\n * > The replacement text of any entity referred to directly or indirectly\n * > in an attribute value must not contain a <.\n * @see https://www.w3.org/TR/xml11/#CleanAttrVals\n * @see https://www.w3.org/TR/xml11/#NT-AttValue\n *\n * Literal whitespace other than space that appear in attribute values\n * are serialized as their entity references, so they will be preserved.\n * (In contrast to whitespace literals in the input which are normalized to spaces)\n * @see https://www.w3.org/TR/xml11/#AVNormalize\n * @see https://w3c.github.io/DOM-Parsing/#serializing-an-element-s-attributes\n */\nfunction addSerializedAttribute(buf, qualifiedName, value) {\n\tbuf.push(' ', qualifiedName, '=\"', value.replace(/[<>&\"\\t\\n\\r]/g, _xmlEncoder), '\"')\n}\n\nfunction serializeToString(node,buf,isHTML,nodeFilter,visibleNamespaces){\n\tif (!visibleNamespaces) {\n\t\tvisibleNamespaces = [];\n\t}\n\n\tif(nodeFilter){\n\t\tnode = nodeFilter(node);\n\t\tif(node){\n\t\t\tif(typeof node == 'string'){\n\t\t\t\tbuf.push(node);\n\t\t\t\treturn;\n\t\t\t}\n\t\t}else{\n\t\t\treturn;\n\t\t}\n\t\t//buf.sort.apply(attrs, attributeSorter);\n\t}\n\n\tswitch(node.nodeType){\n\tcase ELEMENT_NODE:\n\t\tvar attrs = node.attributes;\n\t\tvar len = attrs.length;\n\t\tvar child = node.firstChild;\n\t\tvar nodeName = node.tagName;\n\n\t\tisHTML = NAMESPACE.isHTML(node.namespaceURI) || isHTML\n\n\t\tvar prefixedNodeName = nodeName\n\t\tif (!isHTML && !node.prefix && node.namespaceURI) {\n\t\t\tvar defaultNS\n\t\t\t// lookup current default ns from `xmlns` attribute\n\t\t\tfor (var ai = 0; ai < attrs.length; ai++) {\n\t\t\t\tif (attrs.item(ai).name === 'xmlns') {\n\t\t\t\t\tdefaultNS = attrs.item(ai).value\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (!defaultNS) {\n\t\t\t\t// lookup current default ns in visibleNamespaces\n\t\t\t\tfor (var nsi = visibleNamespaces.length - 1; nsi >= 0; nsi--) {\n\t\t\t\t\tvar namespace = visibleNamespaces[nsi]\n\t\t\t\t\tif (namespace.prefix === '' && namespace.namespace === node.namespaceURI) {\n\t\t\t\t\t\tdefaultNS = namespace.namespace\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (defaultNS !== node.namespaceURI) {\n\t\t\t\tfor (var nsi = visibleNamespaces.length - 1; nsi >= 0; nsi--) {\n\t\t\t\t\tvar namespace = visibleNamespaces[nsi]\n\t\t\t\t\tif (namespace.namespace === node.namespaceURI) {\n\t\t\t\t\t\tif (namespace.prefix) {\n\t\t\t\t\t\t\tprefixedNodeName = namespace.prefix + ':' + nodeName\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tbuf.push('<', prefixedNodeName);\n\n\t\tfor(var i=0;i');\n\t\t\t//if is cdata child node\n\t\t\tif(isHTML && /^script$/i.test(nodeName)){\n\t\t\t\twhile(child){\n\t\t\t\t\tif(child.data){\n\t\t\t\t\t\tbuf.push(child.data);\n\t\t\t\t\t}else{\n\t\t\t\t\t\tserializeToString(child, buf, isHTML, nodeFilter, visibleNamespaces.slice());\n\t\t\t\t\t}\n\t\t\t\t\tchild = child.nextSibling;\n\t\t\t\t}\n\t\t\t}else\n\t\t\t{\n\t\t\t\twhile(child){\n\t\t\t\t\tserializeToString(child, buf, isHTML, nodeFilter, visibleNamespaces.slice());\n\t\t\t\t\tchild = child.nextSibling;\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.push('');\n\t\t}else{\n\t\t\tbuf.push('/>');\n\t\t}\n\t\t// remove added visible namespaces\n\t\t//visibleNamespaces.length = startVisibleNamespaces;\n\t\treturn;\n\tcase DOCUMENT_NODE:\n\tcase DOCUMENT_FRAGMENT_NODE:\n\t\tvar child = node.firstChild;\n\t\twhile(child){\n\t\t\tserializeToString(child, buf, isHTML, nodeFilter, visibleNamespaces.slice());\n\t\t\tchild = child.nextSibling;\n\t\t}\n\t\treturn;\n\tcase ATTRIBUTE_NODE:\n\t\treturn addSerializedAttribute(buf, node.name, node.value);\n\tcase TEXT_NODE:\n\t\t/**\n\t\t * The ampersand character (&) and the left angle bracket (<) must not appear in their literal form,\n\t\t * except when used as markup delimiters, or within a comment, a processing instruction, or a CDATA section.\n\t\t * If they are needed elsewhere, they must be escaped using either numeric character references or the strings\n\t\t * `&` and `<` respectively.\n\t\t * The right angle bracket (>) may be represented using the string \" > \", and must, for compatibility,\n\t\t * be escaped using either `>` or a character reference when it appears in the string `]]>` in content,\n\t\t * when that string is not marking the end of a CDATA section.\n\t\t *\n\t\t * In the content of elements, character data is any string of characters\n\t\t * which does not contain the start-delimiter of any markup\n\t\t * and does not include the CDATA-section-close delimiter, `]]>`.\n\t\t *\n\t\t * @see https://www.w3.org/TR/xml/#NT-CharData\n\t\t * @see https://w3c.github.io/DOM-Parsing/#xml-serializing-a-text-node\n\t\t */\n\t\treturn buf.push(node.data\n\t\t\t.replace(/[<&>]/g,_xmlEncoder)\n\t\t);\n\tcase CDATA_SECTION_NODE:\n\t\treturn buf.push( '');\n\tcase COMMENT_NODE:\n\t\treturn buf.push( \"\");\n\tcase DOCUMENT_TYPE_NODE:\n\t\tvar pubid = node.publicId;\n\t\tvar sysid = node.systemId;\n\t\tbuf.push('');\n\t\t}else if(sysid && sysid!='.'){\n\t\t\tbuf.push(' SYSTEM ', sysid, '>');\n\t\t}else{\n\t\t\tvar sub = node.internalSubset;\n\t\t\tif(sub){\n\t\t\t\tbuf.push(\" [\",sub,\"]\");\n\t\t\t}\n\t\t\tbuf.push(\">\");\n\t\t}\n\t\treturn;\n\tcase PROCESSING_INSTRUCTION_NODE:\n\t\treturn buf.push( \"\");\n\tcase ENTITY_REFERENCE_NODE:\n\t\treturn buf.push( '&',node.nodeName,';');\n\t//case ENTITY_NODE:\n\t//case NOTATION_NODE:\n\tdefault:\n\t\tbuf.push('??',node.nodeName);\n\t}\n}\nfunction importNode(doc,node,deep){\n\tvar node2;\n\tswitch (node.nodeType) {\n\tcase ELEMENT_NODE:\n\t\tnode2 = node.cloneNode(false);\n\t\tnode2.ownerDocument = doc;\n\t\t//var attrs = node2.attributes;\n\t\t//var len = attrs.length;\n\t\t//for(var i=0;i',\n\tlt: '<',\n\tquot: '\"',\n});\n\n/**\n * A map of all entities that are detected in an HTML document.\n * They contain all entries from `XML_ENTITIES`.\n *\n * @see XML_ENTITIES\n * @see DOMParser.parseFromString\n * @see DOMImplementation.prototype.createHTMLDocument\n * @see https://html.spec.whatwg.org/#named-character-references WHATWG HTML(5) Spec\n * @see https://html.spec.whatwg.org/entities.json JSON\n * @see https://www.w3.org/TR/xml-entity-names/ W3C XML Entity Names\n * @see https://www.w3.org/TR/html4/sgml/entities.html W3C HTML4/SGML\n * @see https://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references#Character_entity_references_in_HTML Wikipedia (HTML)\n * @see https://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references#Entities_representing_special_characters_in_XHTML Wikpedia (XHTML)\n */\nexports.HTML_ENTITIES = freeze({\n\tAacute: '\\u00C1',\n\taacute: '\\u00E1',\n\tAbreve: '\\u0102',\n\tabreve: '\\u0103',\n\tac: '\\u223E',\n\tacd: '\\u223F',\n\tacE: '\\u223E\\u0333',\n\tAcirc: '\\u00C2',\n\tacirc: '\\u00E2',\n\tacute: '\\u00B4',\n\tAcy: '\\u0410',\n\tacy: '\\u0430',\n\tAElig: '\\u00C6',\n\taelig: '\\u00E6',\n\taf: '\\u2061',\n\tAfr: '\\uD835\\uDD04',\n\tafr: '\\uD835\\uDD1E',\n\tAgrave: '\\u00C0',\n\tagrave: '\\u00E0',\n\talefsym: '\\u2135',\n\taleph: '\\u2135',\n\tAlpha: '\\u0391',\n\talpha: '\\u03B1',\n\tAmacr: '\\u0100',\n\tamacr: '\\u0101',\n\tamalg: '\\u2A3F',\n\tAMP: '\\u0026',\n\tamp: '\\u0026',\n\tAnd: '\\u2A53',\n\tand: '\\u2227',\n\tandand: '\\u2A55',\n\tandd: '\\u2A5C',\n\tandslope: '\\u2A58',\n\tandv: '\\u2A5A',\n\tang: '\\u2220',\n\tange: '\\u29A4',\n\tangle: '\\u2220',\n\tangmsd: '\\u2221',\n\tangmsdaa: '\\u29A8',\n\tangmsdab: '\\u29A9',\n\tangmsdac: '\\u29AA',\n\tangmsdad: '\\u29AB',\n\tangmsdae: '\\u29AC',\n\tangmsdaf: '\\u29AD',\n\tangmsdag: '\\u29AE',\n\tangmsdah: '\\u29AF',\n\tangrt: '\\u221F',\n\tangrtvb: '\\u22BE',\n\tangrtvbd: '\\u299D',\n\tangsph: '\\u2222',\n\tangst: '\\u00C5',\n\tangzarr: '\\u237C',\n\tAogon: '\\u0104',\n\taogon: '\\u0105',\n\tAopf: '\\uD835\\uDD38',\n\taopf: '\\uD835\\uDD52',\n\tap: '\\u2248',\n\tapacir: '\\u2A6F',\n\tapE: '\\u2A70',\n\tape: '\\u224A',\n\tapid: '\\u224B',\n\tapos: '\\u0027',\n\tApplyFunction: '\\u2061',\n\tapprox: '\\u2248',\n\tapproxeq: '\\u224A',\n\tAring: '\\u00C5',\n\taring: '\\u00E5',\n\tAscr: '\\uD835\\uDC9C',\n\tascr: '\\uD835\\uDCB6',\n\tAssign: '\\u2254',\n\tast: '\\u002A',\n\tasymp: '\\u2248',\n\tasympeq: '\\u224D',\n\tAtilde: '\\u00C3',\n\tatilde: '\\u00E3',\n\tAuml: '\\u00C4',\n\tauml: '\\u00E4',\n\tawconint: '\\u2233',\n\tawint: '\\u2A11',\n\tbackcong: '\\u224C',\n\tbackepsilon: '\\u03F6',\n\tbackprime: '\\u2035',\n\tbacksim: '\\u223D',\n\tbacksimeq: '\\u22CD',\n\tBackslash: '\\u2216',\n\tBarv: '\\u2AE7',\n\tbarvee: '\\u22BD',\n\tBarwed: '\\u2306',\n\tbarwed: '\\u2305',\n\tbarwedge: '\\u2305',\n\tbbrk: '\\u23B5',\n\tbbrktbrk: '\\u23B6',\n\tbcong: '\\u224C',\n\tBcy: '\\u0411',\n\tbcy: '\\u0431',\n\tbdquo: '\\u201E',\n\tbecaus: '\\u2235',\n\tBecause: '\\u2235',\n\tbecause: '\\u2235',\n\tbemptyv: '\\u29B0',\n\tbepsi: '\\u03F6',\n\tbernou: '\\u212C',\n\tBernoullis: '\\u212C',\n\tBeta: '\\u0392',\n\tbeta: '\\u03B2',\n\tbeth: '\\u2136',\n\tbetween: '\\u226C',\n\tBfr: '\\uD835\\uDD05',\n\tbfr: '\\uD835\\uDD1F',\n\tbigcap: '\\u22C2',\n\tbigcirc: '\\u25EF',\n\tbigcup: '\\u22C3',\n\tbigodot: '\\u2A00',\n\tbigoplus: '\\u2A01',\n\tbigotimes: '\\u2A02',\n\tbigsqcup: '\\u2A06',\n\tbigstar: '\\u2605',\n\tbigtriangledown: '\\u25BD',\n\tbigtriangleup: '\\u25B3',\n\tbiguplus: '\\u2A04',\n\tbigvee: '\\u22C1',\n\tbigwedge: '\\u22C0',\n\tbkarow: '\\u290D',\n\tblacklozenge: '\\u29EB',\n\tblacksquare: '\\u25AA',\n\tblacktriangle: '\\u25B4',\n\tblacktriangledown: '\\u25BE',\n\tblacktriangleleft: '\\u25C2',\n\tblacktriangleright: '\\u25B8',\n\tblank: '\\u2423',\n\tblk12: '\\u2592',\n\tblk14: '\\u2591',\n\tblk34: '\\u2593',\n\tblock: '\\u2588',\n\tbne: '\\u003D\\u20E5',\n\tbnequiv: '\\u2261\\u20E5',\n\tbNot: '\\u2AED',\n\tbnot: '\\u2310',\n\tBopf: '\\uD835\\uDD39',\n\tbopf: '\\uD835\\uDD53',\n\tbot: '\\u22A5',\n\tbottom: '\\u22A5',\n\tbowtie: '\\u22C8',\n\tboxbox: '\\u29C9',\n\tboxDL: '\\u2557',\n\tboxDl: '\\u2556',\n\tboxdL: '\\u2555',\n\tboxdl: '\\u2510',\n\tboxDR: '\\u2554',\n\tboxDr: '\\u2553',\n\tboxdR: '\\u2552',\n\tboxdr: '\\u250C',\n\tboxH: '\\u2550',\n\tboxh: '\\u2500',\n\tboxHD: '\\u2566',\n\tboxHd: '\\u2564',\n\tboxhD: '\\u2565',\n\tboxhd: '\\u252C',\n\tboxHU: '\\u2569',\n\tboxHu: '\\u2567',\n\tboxhU: '\\u2568',\n\tboxhu: '\\u2534',\n\tboxminus: '\\u229F',\n\tboxplus: '\\u229E',\n\tboxtimes: '\\u22A0',\n\tboxUL: '\\u255D',\n\tboxUl: '\\u255C',\n\tboxuL: '\\u255B',\n\tboxul: '\\u2518',\n\tboxUR: '\\u255A',\n\tboxUr: '\\u2559',\n\tboxuR: '\\u2558',\n\tboxur: '\\u2514',\n\tboxV: '\\u2551',\n\tboxv: '\\u2502',\n\tboxVH: '\\u256C',\n\tboxVh: '\\u256B',\n\tboxvH: '\\u256A',\n\tboxvh: '\\u253C',\n\tboxVL: '\\u2563',\n\tboxVl: '\\u2562',\n\tboxvL: '\\u2561',\n\tboxvl: '\\u2524',\n\tboxVR: '\\u2560',\n\tboxVr: '\\u255F',\n\tboxvR: '\\u255E',\n\tboxvr: '\\u251C',\n\tbprime: '\\u2035',\n\tBreve: '\\u02D8',\n\tbreve: '\\u02D8',\n\tbrvbar: '\\u00A6',\n\tBscr: '\\u212C',\n\tbscr: '\\uD835\\uDCB7',\n\tbsemi: '\\u204F',\n\tbsim: '\\u223D',\n\tbsime: '\\u22CD',\n\tbsol: '\\u005C',\n\tbsolb: '\\u29C5',\n\tbsolhsub: '\\u27C8',\n\tbull: '\\u2022',\n\tbullet: '\\u2022',\n\tbump: '\\u224E',\n\tbumpE: '\\u2AAE',\n\tbumpe: '\\u224F',\n\tBumpeq: '\\u224E',\n\tbumpeq: '\\u224F',\n\tCacute: '\\u0106',\n\tcacute: '\\u0107',\n\tCap: '\\u22D2',\n\tcap: '\\u2229',\n\tcapand: '\\u2A44',\n\tcapbrcup: '\\u2A49',\n\tcapcap: '\\u2A4B',\n\tcapcup: '\\u2A47',\n\tcapdot: '\\u2A40',\n\tCapitalDifferentialD: '\\u2145',\n\tcaps: '\\u2229\\uFE00',\n\tcaret: '\\u2041',\n\tcaron: '\\u02C7',\n\tCayleys: '\\u212D',\n\tccaps: '\\u2A4D',\n\tCcaron: '\\u010C',\n\tccaron: '\\u010D',\n\tCcedil: '\\u00C7',\n\tccedil: '\\u00E7',\n\tCcirc: '\\u0108',\n\tccirc: '\\u0109',\n\tCconint: '\\u2230',\n\tccups: '\\u2A4C',\n\tccupssm: '\\u2A50',\n\tCdot: '\\u010A',\n\tcdot: '\\u010B',\n\tcedil: '\\u00B8',\n\tCedilla: '\\u00B8',\n\tcemptyv: '\\u29B2',\n\tcent: '\\u00A2',\n\tCenterDot: '\\u00B7',\n\tcenterdot: '\\u00B7',\n\tCfr: '\\u212D',\n\tcfr: '\\uD835\\uDD20',\n\tCHcy: '\\u0427',\n\tchcy: '\\u0447',\n\tcheck: '\\u2713',\n\tcheckmark: '\\u2713',\n\tChi: '\\u03A7',\n\tchi: '\\u03C7',\n\tcir: '\\u25CB',\n\tcirc: '\\u02C6',\n\tcirceq: '\\u2257',\n\tcirclearrowleft: '\\u21BA',\n\tcirclearrowright: '\\u21BB',\n\tcircledast: '\\u229B',\n\tcircledcirc: '\\u229A',\n\tcircleddash: '\\u229D',\n\tCircleDot: '\\u2299',\n\tcircledR: '\\u00AE',\n\tcircledS: '\\u24C8',\n\tCircleMinus: '\\u2296',\n\tCirclePlus: '\\u2295',\n\tCircleTimes: '\\u2297',\n\tcirE: '\\u29C3',\n\tcire: '\\u2257',\n\tcirfnint: '\\u2A10',\n\tcirmid: '\\u2AEF',\n\tcirscir: '\\u29C2',\n\tClockwiseContourIntegral: '\\u2232',\n\tCloseCurlyDoubleQuote: '\\u201D',\n\tCloseCurlyQuote: '\\u2019',\n\tclubs: '\\u2663',\n\tclubsuit: '\\u2663',\n\tColon: '\\u2237',\n\tcolon: '\\u003A',\n\tColone: '\\u2A74',\n\tcolone: '\\u2254',\n\tcoloneq: '\\u2254',\n\tcomma: '\\u002C',\n\tcommat: '\\u0040',\n\tcomp: '\\u2201',\n\tcompfn: '\\u2218',\n\tcomplement: '\\u2201',\n\tcomplexes: '\\u2102',\n\tcong: '\\u2245',\n\tcongdot: '\\u2A6D',\n\tCongruent: '\\u2261',\n\tConint: '\\u222F',\n\tconint: '\\u222E',\n\tContourIntegral: '\\u222E',\n\tCopf: '\\u2102',\n\tcopf: '\\uD835\\uDD54',\n\tcoprod: '\\u2210',\n\tCoproduct: '\\u2210',\n\tCOPY: '\\u00A9',\n\tcopy: '\\u00A9',\n\tcopysr: '\\u2117',\n\tCounterClockwiseContourIntegral: '\\u2233',\n\tcrarr: '\\u21B5',\n\tCross: '\\u2A2F',\n\tcross: '\\u2717',\n\tCscr: '\\uD835\\uDC9E',\n\tcscr: '\\uD835\\uDCB8',\n\tcsub: '\\u2ACF',\n\tcsube: '\\u2AD1',\n\tcsup: '\\u2AD0',\n\tcsupe: '\\u2AD2',\n\tctdot: '\\u22EF',\n\tcudarrl: '\\u2938',\n\tcudarrr: '\\u2935',\n\tcuepr: '\\u22DE',\n\tcuesc: '\\u22DF',\n\tcularr: '\\u21B6',\n\tcularrp: '\\u293D',\n\tCup: '\\u22D3',\n\tcup: '\\u222A',\n\tcupbrcap: '\\u2A48',\n\tCupCap: '\\u224D',\n\tcupcap: '\\u2A46',\n\tcupcup: '\\u2A4A',\n\tcupdot: '\\u228D',\n\tcupor: '\\u2A45',\n\tcups: '\\u222A\\uFE00',\n\tcurarr: '\\u21B7',\n\tcurarrm: '\\u293C',\n\tcurlyeqprec: '\\u22DE',\n\tcurlyeqsucc: '\\u22DF',\n\tcurlyvee: '\\u22CE',\n\tcurlywedge: '\\u22CF',\n\tcurren: '\\u00A4',\n\tcurvearrowleft: '\\u21B6',\n\tcurvearrowright: '\\u21B7',\n\tcuvee: '\\u22CE',\n\tcuwed: '\\u22CF',\n\tcwconint: '\\u2232',\n\tcwint: '\\u2231',\n\tcylcty: '\\u232D',\n\tDagger: '\\u2021',\n\tdagger: '\\u2020',\n\tdaleth: '\\u2138',\n\tDarr: '\\u21A1',\n\tdArr: '\\u21D3',\n\tdarr: '\\u2193',\n\tdash: '\\u2010',\n\tDashv: '\\u2AE4',\n\tdashv: '\\u22A3',\n\tdbkarow: '\\u290F',\n\tdblac: '\\u02DD',\n\tDcaron: '\\u010E',\n\tdcaron: '\\u010F',\n\tDcy: '\\u0414',\n\tdcy: '\\u0434',\n\tDD: '\\u2145',\n\tdd: '\\u2146',\n\tddagger: '\\u2021',\n\tddarr: '\\u21CA',\n\tDDotrahd: '\\u2911',\n\tddotseq: '\\u2A77',\n\tdeg: '\\u00B0',\n\tDel: '\\u2207',\n\tDelta: '\\u0394',\n\tdelta: '\\u03B4',\n\tdemptyv: '\\u29B1',\n\tdfisht: '\\u297F',\n\tDfr: '\\uD835\\uDD07',\n\tdfr: '\\uD835\\uDD21',\n\tdHar: '\\u2965',\n\tdharl: '\\u21C3',\n\tdharr: '\\u21C2',\n\tDiacriticalAcute: '\\u00B4',\n\tDiacriticalDot: '\\u02D9',\n\tDiacriticalDoubleAcute: '\\u02DD',\n\tDiacriticalGrave: '\\u0060',\n\tDiacriticalTilde: '\\u02DC',\n\tdiam: '\\u22C4',\n\tDiamond: '\\u22C4',\n\tdiamond: '\\u22C4',\n\tdiamondsuit: '\\u2666',\n\tdiams: '\\u2666',\n\tdie: '\\u00A8',\n\tDifferentialD: '\\u2146',\n\tdigamma: '\\u03DD',\n\tdisin: '\\u22F2',\n\tdiv: '\\u00F7',\n\tdivide: '\\u00F7',\n\tdivideontimes: '\\u22C7',\n\tdivonx: '\\u22C7',\n\tDJcy: '\\u0402',\n\tdjcy: '\\u0452',\n\tdlcorn: '\\u231E',\n\tdlcrop: '\\u230D',\n\tdollar: '\\u0024',\n\tDopf: '\\uD835\\uDD3B',\n\tdopf: '\\uD835\\uDD55',\n\tDot: '\\u00A8',\n\tdot: '\\u02D9',\n\tDotDot: '\\u20DC',\n\tdoteq: '\\u2250',\n\tdoteqdot: '\\u2251',\n\tDotEqual: '\\u2250',\n\tdotminus: '\\u2238',\n\tdotplus: '\\u2214',\n\tdotsquare: '\\u22A1',\n\tdoublebarwedge: '\\u2306',\n\tDoubleContourIntegral: '\\u222F',\n\tDoubleDot: '\\u00A8',\n\tDoubleDownArrow: '\\u21D3',\n\tDoubleLeftArrow: '\\u21D0',\n\tDoubleLeftRightArrow: '\\u21D4',\n\tDoubleLeftTee: '\\u2AE4',\n\tDoubleLongLeftArrow: '\\u27F8',\n\tDoubleLongLeftRightArrow: '\\u27FA',\n\tDoubleLongRightArrow: '\\u27F9',\n\tDoubleRightArrow: '\\u21D2',\n\tDoubleRightTee: '\\u22A8',\n\tDoubleUpArrow: '\\u21D1',\n\tDoubleUpDownArrow: '\\u21D5',\n\tDoubleVerticalBar: '\\u2225',\n\tDownArrow: '\\u2193',\n\tDownarrow: '\\u21D3',\n\tdownarrow: '\\u2193',\n\tDownArrowBar: '\\u2913',\n\tDownArrowUpArrow: '\\u21F5',\n\tDownBreve: '\\u0311',\n\tdowndownarrows: '\\u21CA',\n\tdownharpoonleft: '\\u21C3',\n\tdownharpoonright: '\\u21C2',\n\tDownLeftRightVector: '\\u2950',\n\tDownLeftTeeVector: '\\u295E',\n\tDownLeftVector: '\\u21BD',\n\tDownLeftVectorBar: '\\u2956',\n\tDownRightTeeVector: '\\u295F',\n\tDownRightVector: '\\u21C1',\n\tDownRightVectorBar: '\\u2957',\n\tDownTee: '\\u22A4',\n\tDownTeeArrow: '\\u21A7',\n\tdrbkarow: '\\u2910',\n\tdrcorn: '\\u231F',\n\tdrcrop: '\\u230C',\n\tDscr: '\\uD835\\uDC9F',\n\tdscr: '\\uD835\\uDCB9',\n\tDScy: '\\u0405',\n\tdscy: '\\u0455',\n\tdsol: '\\u29F6',\n\tDstrok: '\\u0110',\n\tdstrok: '\\u0111',\n\tdtdot: '\\u22F1',\n\tdtri: '\\u25BF',\n\tdtrif: '\\u25BE',\n\tduarr: '\\u21F5',\n\tduhar: '\\u296F',\n\tdwangle: '\\u29A6',\n\tDZcy: '\\u040F',\n\tdzcy: '\\u045F',\n\tdzigrarr: '\\u27FF',\n\tEacute: '\\u00C9',\n\teacute: '\\u00E9',\n\teaster: '\\u2A6E',\n\tEcaron: '\\u011A',\n\tecaron: '\\u011B',\n\tecir: '\\u2256',\n\tEcirc: '\\u00CA',\n\tecirc: '\\u00EA',\n\tecolon: '\\u2255',\n\tEcy: '\\u042D',\n\tecy: '\\u044D',\n\teDDot: '\\u2A77',\n\tEdot: '\\u0116',\n\teDot: '\\u2251',\n\tedot: '\\u0117',\n\tee: '\\u2147',\n\tefDot: '\\u2252',\n\tEfr: '\\uD835\\uDD08',\n\tefr: '\\uD835\\uDD22',\n\teg: '\\u2A9A',\n\tEgrave: '\\u00C8',\n\tegrave: '\\u00E8',\n\tegs: '\\u2A96',\n\tegsdot: '\\u2A98',\n\tel: '\\u2A99',\n\tElement: '\\u2208',\n\telinters: '\\u23E7',\n\tell: '\\u2113',\n\tels: '\\u2A95',\n\telsdot: '\\u2A97',\n\tEmacr: '\\u0112',\n\temacr: '\\u0113',\n\tempty: '\\u2205',\n\temptyset: '\\u2205',\n\tEmptySmallSquare: '\\u25FB',\n\temptyv: '\\u2205',\n\tEmptyVerySmallSquare: '\\u25AB',\n\temsp: '\\u2003',\n\temsp13: '\\u2004',\n\temsp14: '\\u2005',\n\tENG: '\\u014A',\n\teng: '\\u014B',\n\tensp: '\\u2002',\n\tEogon: '\\u0118',\n\teogon: '\\u0119',\n\tEopf: '\\uD835\\uDD3C',\n\teopf: '\\uD835\\uDD56',\n\tepar: '\\u22D5',\n\teparsl: '\\u29E3',\n\teplus: '\\u2A71',\n\tepsi: '\\u03B5',\n\tEpsilon: '\\u0395',\n\tepsilon: '\\u03B5',\n\tepsiv: '\\u03F5',\n\teqcirc: '\\u2256',\n\teqcolon: '\\u2255',\n\teqsim: '\\u2242',\n\teqslantgtr: '\\u2A96',\n\teqslantless: '\\u2A95',\n\tEqual: '\\u2A75',\n\tequals: '\\u003D',\n\tEqualTilde: '\\u2242',\n\tequest: '\\u225F',\n\tEquilibrium: '\\u21CC',\n\tequiv: '\\u2261',\n\tequivDD: '\\u2A78',\n\teqvparsl: '\\u29E5',\n\terarr: '\\u2971',\n\terDot: '\\u2253',\n\tEscr: '\\u2130',\n\tescr: '\\u212F',\n\tesdot: '\\u2250',\n\tEsim: '\\u2A73',\n\tesim: '\\u2242',\n\tEta: '\\u0397',\n\teta: '\\u03B7',\n\tETH: '\\u00D0',\n\teth: '\\u00F0',\n\tEuml: '\\u00CB',\n\teuml: '\\u00EB',\n\teuro: '\\u20AC',\n\texcl: '\\u0021',\n\texist: '\\u2203',\n\tExists: '\\u2203',\n\texpectation: '\\u2130',\n\tExponentialE: '\\u2147',\n\texponentiale: '\\u2147',\n\tfallingdotseq: '\\u2252',\n\tFcy: '\\u0424',\n\tfcy: '\\u0444',\n\tfemale: '\\u2640',\n\tffilig: '\\uFB03',\n\tfflig: '\\uFB00',\n\tffllig: '\\uFB04',\n\tFfr: '\\uD835\\uDD09',\n\tffr: '\\uD835\\uDD23',\n\tfilig: '\\uFB01',\n\tFilledSmallSquare: '\\u25FC',\n\tFilledVerySmallSquare: '\\u25AA',\n\tfjlig: '\\u0066\\u006A',\n\tflat: '\\u266D',\n\tfllig: '\\uFB02',\n\tfltns: '\\u25B1',\n\tfnof: '\\u0192',\n\tFopf: '\\uD835\\uDD3D',\n\tfopf: '\\uD835\\uDD57',\n\tForAll: '\\u2200',\n\tforall: '\\u2200',\n\tfork: '\\u22D4',\n\tforkv: '\\u2AD9',\n\tFouriertrf: '\\u2131',\n\tfpartint: '\\u2A0D',\n\tfrac12: '\\u00BD',\n\tfrac13: '\\u2153',\n\tfrac14: '\\u00BC',\n\tfrac15: '\\u2155',\n\tfrac16: '\\u2159',\n\tfrac18: '\\u215B',\n\tfrac23: '\\u2154',\n\tfrac25: '\\u2156',\n\tfrac34: '\\u00BE',\n\tfrac35: '\\u2157',\n\tfrac38: '\\u215C',\n\tfrac45: '\\u2158',\n\tfrac56: '\\u215A',\n\tfrac58: '\\u215D',\n\tfrac78: '\\u215E',\n\tfrasl: '\\u2044',\n\tfrown: '\\u2322',\n\tFscr: '\\u2131',\n\tfscr: '\\uD835\\uDCBB',\n\tgacute: '\\u01F5',\n\tGamma: '\\u0393',\n\tgamma: '\\u03B3',\n\tGammad: '\\u03DC',\n\tgammad: '\\u03DD',\n\tgap: '\\u2A86',\n\tGbreve: '\\u011E',\n\tgbreve: '\\u011F',\n\tGcedil: '\\u0122',\n\tGcirc: '\\u011C',\n\tgcirc: '\\u011D',\n\tGcy: '\\u0413',\n\tgcy: '\\u0433',\n\tGdot: '\\u0120',\n\tgdot: '\\u0121',\n\tgE: '\\u2267',\n\tge: '\\u2265',\n\tgEl: '\\u2A8C',\n\tgel: '\\u22DB',\n\tgeq: '\\u2265',\n\tgeqq: '\\u2267',\n\tgeqslant: '\\u2A7E',\n\tges: '\\u2A7E',\n\tgescc: '\\u2AA9',\n\tgesdot: '\\u2A80',\n\tgesdoto: '\\u2A82',\n\tgesdotol: '\\u2A84',\n\tgesl: '\\u22DB\\uFE00',\n\tgesles: '\\u2A94',\n\tGfr: '\\uD835\\uDD0A',\n\tgfr: '\\uD835\\uDD24',\n\tGg: '\\u22D9',\n\tgg: '\\u226B',\n\tggg: '\\u22D9',\n\tgimel: '\\u2137',\n\tGJcy: '\\u0403',\n\tgjcy: '\\u0453',\n\tgl: '\\u2277',\n\tgla: '\\u2AA5',\n\tglE: '\\u2A92',\n\tglj: '\\u2AA4',\n\tgnap: '\\u2A8A',\n\tgnapprox: '\\u2A8A',\n\tgnE: '\\u2269',\n\tgne: '\\u2A88',\n\tgneq: '\\u2A88',\n\tgneqq: '\\u2269',\n\tgnsim: '\\u22E7',\n\tGopf: '\\uD835\\uDD3E',\n\tgopf: '\\uD835\\uDD58',\n\tgrave: '\\u0060',\n\tGreaterEqual: '\\u2265',\n\tGreaterEqualLess: '\\u22DB',\n\tGreaterFullEqual: '\\u2267',\n\tGreaterGreater: '\\u2AA2',\n\tGreaterLess: '\\u2277',\n\tGreaterSlantEqual: '\\u2A7E',\n\tGreaterTilde: '\\u2273',\n\tGscr: '\\uD835\\uDCA2',\n\tgscr: '\\u210A',\n\tgsim: '\\u2273',\n\tgsime: '\\u2A8E',\n\tgsiml: '\\u2A90',\n\tGt: '\\u226B',\n\tGT: '\\u003E',\n\tgt: '\\u003E',\n\tgtcc: '\\u2AA7',\n\tgtcir: '\\u2A7A',\n\tgtdot: '\\u22D7',\n\tgtlPar: '\\u2995',\n\tgtquest: '\\u2A7C',\n\tgtrapprox: '\\u2A86',\n\tgtrarr: '\\u2978',\n\tgtrdot: '\\u22D7',\n\tgtreqless: '\\u22DB',\n\tgtreqqless: '\\u2A8C',\n\tgtrless: '\\u2277',\n\tgtrsim: '\\u2273',\n\tgvertneqq: '\\u2269\\uFE00',\n\tgvnE: '\\u2269\\uFE00',\n\tHacek: '\\u02C7',\n\thairsp: '\\u200A',\n\thalf: '\\u00BD',\n\thamilt: '\\u210B',\n\tHARDcy: '\\u042A',\n\thardcy: '\\u044A',\n\thArr: '\\u21D4',\n\tharr: '\\u2194',\n\tharrcir: '\\u2948',\n\tharrw: '\\u21AD',\n\tHat: '\\u005E',\n\thbar: '\\u210F',\n\tHcirc: '\\u0124',\n\thcirc: '\\u0125',\n\thearts: '\\u2665',\n\theartsuit: '\\u2665',\n\thellip: '\\u2026',\n\thercon: '\\u22B9',\n\tHfr: '\\u210C',\n\thfr: '\\uD835\\uDD25',\n\tHilbertSpace: '\\u210B',\n\thksearow: '\\u2925',\n\thkswarow: '\\u2926',\n\thoarr: '\\u21FF',\n\thomtht: '\\u223B',\n\thookleftarrow: '\\u21A9',\n\thookrightarrow: '\\u21AA',\n\tHopf: '\\u210D',\n\thopf: '\\uD835\\uDD59',\n\thorbar: '\\u2015',\n\tHorizontalLine: '\\u2500',\n\tHscr: '\\u210B',\n\thscr: '\\uD835\\uDCBD',\n\thslash: '\\u210F',\n\tHstrok: '\\u0126',\n\thstrok: '\\u0127',\n\tHumpDownHump: '\\u224E',\n\tHumpEqual: '\\u224F',\n\thybull: '\\u2043',\n\thyphen: '\\u2010',\n\tIacute: '\\u00CD',\n\tiacute: '\\u00ED',\n\tic: '\\u2063',\n\tIcirc: '\\u00CE',\n\ticirc: '\\u00EE',\n\tIcy: '\\u0418',\n\ticy: '\\u0438',\n\tIdot: '\\u0130',\n\tIEcy: '\\u0415',\n\tiecy: '\\u0435',\n\tiexcl: '\\u00A1',\n\tiff: '\\u21D4',\n\tIfr: '\\u2111',\n\tifr: '\\uD835\\uDD26',\n\tIgrave: '\\u00CC',\n\tigrave: '\\u00EC',\n\tii: '\\u2148',\n\tiiiint: '\\u2A0C',\n\tiiint: '\\u222D',\n\tiinfin: '\\u29DC',\n\tiiota: '\\u2129',\n\tIJlig: '\\u0132',\n\tijlig: '\\u0133',\n\tIm: '\\u2111',\n\tImacr: '\\u012A',\n\timacr: '\\u012B',\n\timage: '\\u2111',\n\tImaginaryI: '\\u2148',\n\timagline: '\\u2110',\n\timagpart: '\\u2111',\n\timath: '\\u0131',\n\timof: '\\u22B7',\n\timped: '\\u01B5',\n\tImplies: '\\u21D2',\n\tin: '\\u2208',\n\tincare: '\\u2105',\n\tinfin: '\\u221E',\n\tinfintie: '\\u29DD',\n\tinodot: '\\u0131',\n\tInt: '\\u222C',\n\tint: '\\u222B',\n\tintcal: '\\u22BA',\n\tintegers: '\\u2124',\n\tIntegral: '\\u222B',\n\tintercal: '\\u22BA',\n\tIntersection: '\\u22C2',\n\tintlarhk: '\\u2A17',\n\tintprod: '\\u2A3C',\n\tInvisibleComma: '\\u2063',\n\tInvisibleTimes: '\\u2062',\n\tIOcy: '\\u0401',\n\tiocy: '\\u0451',\n\tIogon: '\\u012E',\n\tiogon: '\\u012F',\n\tIopf: '\\uD835\\uDD40',\n\tiopf: '\\uD835\\uDD5A',\n\tIota: '\\u0399',\n\tiota: '\\u03B9',\n\tiprod: '\\u2A3C',\n\tiquest: '\\u00BF',\n\tIscr: '\\u2110',\n\tiscr: '\\uD835\\uDCBE',\n\tisin: '\\u2208',\n\tisindot: '\\u22F5',\n\tisinE: '\\u22F9',\n\tisins: '\\u22F4',\n\tisinsv: '\\u22F3',\n\tisinv: '\\u2208',\n\tit: '\\u2062',\n\tItilde: '\\u0128',\n\titilde: '\\u0129',\n\tIukcy: '\\u0406',\n\tiukcy: '\\u0456',\n\tIuml: '\\u00CF',\n\tiuml: '\\u00EF',\n\tJcirc: '\\u0134',\n\tjcirc: '\\u0135',\n\tJcy: '\\u0419',\n\tjcy: '\\u0439',\n\tJfr: '\\uD835\\uDD0D',\n\tjfr: '\\uD835\\uDD27',\n\tjmath: '\\u0237',\n\tJopf: '\\uD835\\uDD41',\n\tjopf: '\\uD835\\uDD5B',\n\tJscr: '\\uD835\\uDCA5',\n\tjscr: '\\uD835\\uDCBF',\n\tJsercy: '\\u0408',\n\tjsercy: '\\u0458',\n\tJukcy: '\\u0404',\n\tjukcy: '\\u0454',\n\tKappa: '\\u039A',\n\tkappa: '\\u03BA',\n\tkappav: '\\u03F0',\n\tKcedil: '\\u0136',\n\tkcedil: '\\u0137',\n\tKcy: '\\u041A',\n\tkcy: '\\u043A',\n\tKfr: '\\uD835\\uDD0E',\n\tkfr: '\\uD835\\uDD28',\n\tkgreen: '\\u0138',\n\tKHcy: '\\u0425',\n\tkhcy: '\\u0445',\n\tKJcy: '\\u040C',\n\tkjcy: '\\u045C',\n\tKopf: '\\uD835\\uDD42',\n\tkopf: '\\uD835\\uDD5C',\n\tKscr: '\\uD835\\uDCA6',\n\tkscr: '\\uD835\\uDCC0',\n\tlAarr: '\\u21DA',\n\tLacute: '\\u0139',\n\tlacute: '\\u013A',\n\tlaemptyv: '\\u29B4',\n\tlagran: '\\u2112',\n\tLambda: '\\u039B',\n\tlambda: '\\u03BB',\n\tLang: '\\u27EA',\n\tlang: '\\u27E8',\n\tlangd: '\\u2991',\n\tlangle: '\\u27E8',\n\tlap: '\\u2A85',\n\tLaplacetrf: '\\u2112',\n\tlaquo: '\\u00AB',\n\tLarr: '\\u219E',\n\tlArr: '\\u21D0',\n\tlarr: '\\u2190',\n\tlarrb: '\\u21E4',\n\tlarrbfs: '\\u291F',\n\tlarrfs: '\\u291D',\n\tlarrhk: '\\u21A9',\n\tlarrlp: '\\u21AB',\n\tlarrpl: '\\u2939',\n\tlarrsim: '\\u2973',\n\tlarrtl: '\\u21A2',\n\tlat: '\\u2AAB',\n\tlAtail: '\\u291B',\n\tlatail: '\\u2919',\n\tlate: '\\u2AAD',\n\tlates: '\\u2AAD\\uFE00',\n\tlBarr: '\\u290E',\n\tlbarr: '\\u290C',\n\tlbbrk: '\\u2772',\n\tlbrace: '\\u007B',\n\tlbrack: '\\u005B',\n\tlbrke: '\\u298B',\n\tlbrksld: '\\u298F',\n\tlbrkslu: '\\u298D',\n\tLcaron: '\\u013D',\n\tlcaron: '\\u013E',\n\tLcedil: '\\u013B',\n\tlcedil: '\\u013C',\n\tlceil: '\\u2308',\n\tlcub: '\\u007B',\n\tLcy: '\\u041B',\n\tlcy: '\\u043B',\n\tldca: '\\u2936',\n\tldquo: '\\u201C',\n\tldquor: '\\u201E',\n\tldrdhar: '\\u2967',\n\tldrushar: '\\u294B',\n\tldsh: '\\u21B2',\n\tlE: '\\u2266',\n\tle: '\\u2264',\n\tLeftAngleBracket: '\\u27E8',\n\tLeftArrow: '\\u2190',\n\tLeftarrow: '\\u21D0',\n\tleftarrow: '\\u2190',\n\tLeftArrowBar: '\\u21E4',\n\tLeftArrowRightArrow: '\\u21C6',\n\tleftarrowtail: '\\u21A2',\n\tLeftCeiling: '\\u2308',\n\tLeftDoubleBracket: '\\u27E6',\n\tLeftDownTeeVector: '\\u2961',\n\tLeftDownVector: '\\u21C3',\n\tLeftDownVectorBar: '\\u2959',\n\tLeftFloor: '\\u230A',\n\tleftharpoondown: '\\u21BD',\n\tleftharpoonup: '\\u21BC',\n\tleftleftarrows: '\\u21C7',\n\tLeftRightArrow: '\\u2194',\n\tLeftrightarrow: '\\u21D4',\n\tleftrightarrow: '\\u2194',\n\tleftrightarrows: '\\u21C6',\n\tleftrightharpoons: '\\u21CB',\n\tleftrightsquigarrow: '\\u21AD',\n\tLeftRightVector: '\\u294E',\n\tLeftTee: '\\u22A3',\n\tLeftTeeArrow: '\\u21A4',\n\tLeftTeeVector: '\\u295A',\n\tleftthreetimes: '\\u22CB',\n\tLeftTriangle: '\\u22B2',\n\tLeftTriangleBar: '\\u29CF',\n\tLeftTriangleEqual: '\\u22B4',\n\tLeftUpDownVector: '\\u2951',\n\tLeftUpTeeVector: '\\u2960',\n\tLeftUpVector: '\\u21BF',\n\tLeftUpVectorBar: '\\u2958',\n\tLeftVector: '\\u21BC',\n\tLeftVectorBar: '\\u2952',\n\tlEg: '\\u2A8B',\n\tleg: '\\u22DA',\n\tleq: '\\u2264',\n\tleqq: '\\u2266',\n\tleqslant: '\\u2A7D',\n\tles: '\\u2A7D',\n\tlescc: '\\u2AA8',\n\tlesdot: '\\u2A7F',\n\tlesdoto: '\\u2A81',\n\tlesdotor: '\\u2A83',\n\tlesg: '\\u22DA\\uFE00',\n\tlesges: '\\u2A93',\n\tlessapprox: '\\u2A85',\n\tlessdot: '\\u22D6',\n\tlesseqgtr: '\\u22DA',\n\tlesseqqgtr: '\\u2A8B',\n\tLessEqualGreater: '\\u22DA',\n\tLessFullEqual: '\\u2266',\n\tLessGreater: '\\u2276',\n\tlessgtr: '\\u2276',\n\tLessLess: '\\u2AA1',\n\tlesssim: '\\u2272',\n\tLessSlantEqual: '\\u2A7D',\n\tLessTilde: '\\u2272',\n\tlfisht: '\\u297C',\n\tlfloor: '\\u230A',\n\tLfr: '\\uD835\\uDD0F',\n\tlfr: '\\uD835\\uDD29',\n\tlg: '\\u2276',\n\tlgE: '\\u2A91',\n\tlHar: '\\u2962',\n\tlhard: '\\u21BD',\n\tlharu: '\\u21BC',\n\tlharul: '\\u296A',\n\tlhblk: '\\u2584',\n\tLJcy: '\\u0409',\n\tljcy: '\\u0459',\n\tLl: '\\u22D8',\n\tll: '\\u226A',\n\tllarr: '\\u21C7',\n\tllcorner: '\\u231E',\n\tLleftarrow: '\\u21DA',\n\tllhard: '\\u296B',\n\tlltri: '\\u25FA',\n\tLmidot: '\\u013F',\n\tlmidot: '\\u0140',\n\tlmoust: '\\u23B0',\n\tlmoustache: '\\u23B0',\n\tlnap: '\\u2A89',\n\tlnapprox: '\\u2A89',\n\tlnE: '\\u2268',\n\tlne: '\\u2A87',\n\tlneq: '\\u2A87',\n\tlneqq: '\\u2268',\n\tlnsim: '\\u22E6',\n\tloang: '\\u27EC',\n\tloarr: '\\u21FD',\n\tlobrk: '\\u27E6',\n\tLongLeftArrow: '\\u27F5',\n\tLongleftarrow: '\\u27F8',\n\tlongleftarrow: '\\u27F5',\n\tLongLeftRightArrow: '\\u27F7',\n\tLongleftrightarrow: '\\u27FA',\n\tlongleftrightarrow: '\\u27F7',\n\tlongmapsto: '\\u27FC',\n\tLongRightArrow: '\\u27F6',\n\tLongrightarrow: '\\u27F9',\n\tlongrightarrow: '\\u27F6',\n\tlooparrowleft: '\\u21AB',\n\tlooparrowright: '\\u21AC',\n\tlopar: '\\u2985',\n\tLopf: '\\uD835\\uDD43',\n\tlopf: '\\uD835\\uDD5D',\n\tloplus: '\\u2A2D',\n\tlotimes: '\\u2A34',\n\tlowast: '\\u2217',\n\tlowbar: '\\u005F',\n\tLowerLeftArrow: '\\u2199',\n\tLowerRightArrow: '\\u2198',\n\tloz: '\\u25CA',\n\tlozenge: '\\u25CA',\n\tlozf: '\\u29EB',\n\tlpar: '\\u0028',\n\tlparlt: '\\u2993',\n\tlrarr: '\\u21C6',\n\tlrcorner: '\\u231F',\n\tlrhar: '\\u21CB',\n\tlrhard: '\\u296D',\n\tlrm: '\\u200E',\n\tlrtri: '\\u22BF',\n\tlsaquo: '\\u2039',\n\tLscr: '\\u2112',\n\tlscr: '\\uD835\\uDCC1',\n\tLsh: '\\u21B0',\n\tlsh: '\\u21B0',\n\tlsim: '\\u2272',\n\tlsime: '\\u2A8D',\n\tlsimg: '\\u2A8F',\n\tlsqb: '\\u005B',\n\tlsquo: '\\u2018',\n\tlsquor: '\\u201A',\n\tLstrok: '\\u0141',\n\tlstrok: '\\u0142',\n\tLt: '\\u226A',\n\tLT: '\\u003C',\n\tlt: '\\u003C',\n\tltcc: '\\u2AA6',\n\tltcir: '\\u2A79',\n\tltdot: '\\u22D6',\n\tlthree: '\\u22CB',\n\tltimes: '\\u22C9',\n\tltlarr: '\\u2976',\n\tltquest: '\\u2A7B',\n\tltri: '\\u25C3',\n\tltrie: '\\u22B4',\n\tltrif: '\\u25C2',\n\tltrPar: '\\u2996',\n\tlurdshar: '\\u294A',\n\tluruhar: '\\u2966',\n\tlvertneqq: '\\u2268\\uFE00',\n\tlvnE: '\\u2268\\uFE00',\n\tmacr: '\\u00AF',\n\tmale: '\\u2642',\n\tmalt: '\\u2720',\n\tmaltese: '\\u2720',\n\tMap: '\\u2905',\n\tmap: '\\u21A6',\n\tmapsto: '\\u21A6',\n\tmapstodown: '\\u21A7',\n\tmapstoleft: '\\u21A4',\n\tmapstoup: '\\u21A5',\n\tmarker: '\\u25AE',\n\tmcomma: '\\u2A29',\n\tMcy: '\\u041C',\n\tmcy: '\\u043C',\n\tmdash: '\\u2014',\n\tmDDot: '\\u223A',\n\tmeasuredangle: '\\u2221',\n\tMediumSpace: '\\u205F',\n\tMellintrf: '\\u2133',\n\tMfr: '\\uD835\\uDD10',\n\tmfr: '\\uD835\\uDD2A',\n\tmho: '\\u2127',\n\tmicro: '\\u00B5',\n\tmid: '\\u2223',\n\tmidast: '\\u002A',\n\tmidcir: '\\u2AF0',\n\tmiddot: '\\u00B7',\n\tminus: '\\u2212',\n\tminusb: '\\u229F',\n\tminusd: '\\u2238',\n\tminusdu: '\\u2A2A',\n\tMinusPlus: '\\u2213',\n\tmlcp: '\\u2ADB',\n\tmldr: '\\u2026',\n\tmnplus: '\\u2213',\n\tmodels: '\\u22A7',\n\tMopf: '\\uD835\\uDD44',\n\tmopf: '\\uD835\\uDD5E',\n\tmp: '\\u2213',\n\tMscr: '\\u2133',\n\tmscr: '\\uD835\\uDCC2',\n\tmstpos: '\\u223E',\n\tMu: '\\u039C',\n\tmu: '\\u03BC',\n\tmultimap: '\\u22B8',\n\tmumap: '\\u22B8',\n\tnabla: '\\u2207',\n\tNacute: '\\u0143',\n\tnacute: '\\u0144',\n\tnang: '\\u2220\\u20D2',\n\tnap: '\\u2249',\n\tnapE: '\\u2A70\\u0338',\n\tnapid: '\\u224B\\u0338',\n\tnapos: '\\u0149',\n\tnapprox: '\\u2249',\n\tnatur: '\\u266E',\n\tnatural: '\\u266E',\n\tnaturals: '\\u2115',\n\tnbsp: '\\u00A0',\n\tnbump: '\\u224E\\u0338',\n\tnbumpe: '\\u224F\\u0338',\n\tncap: '\\u2A43',\n\tNcaron: '\\u0147',\n\tncaron: '\\u0148',\n\tNcedil: '\\u0145',\n\tncedil: '\\u0146',\n\tncong: '\\u2247',\n\tncongdot: '\\u2A6D\\u0338',\n\tncup: '\\u2A42',\n\tNcy: '\\u041D',\n\tncy: '\\u043D',\n\tndash: '\\u2013',\n\tne: '\\u2260',\n\tnearhk: '\\u2924',\n\tneArr: '\\u21D7',\n\tnearr: '\\u2197',\n\tnearrow: '\\u2197',\n\tnedot: '\\u2250\\u0338',\n\tNegativeMediumSpace: '\\u200B',\n\tNegativeThickSpace: '\\u200B',\n\tNegativeThinSpace: '\\u200B',\n\tNegativeVeryThinSpace: '\\u200B',\n\tnequiv: '\\u2262',\n\tnesear: '\\u2928',\n\tnesim: '\\u2242\\u0338',\n\tNestedGreaterGreater: '\\u226B',\n\tNestedLessLess: '\\u226A',\n\tNewLine: '\\u000A',\n\tnexist: '\\u2204',\n\tnexists: '\\u2204',\n\tNfr: '\\uD835\\uDD11',\n\tnfr: '\\uD835\\uDD2B',\n\tngE: '\\u2267\\u0338',\n\tnge: '\\u2271',\n\tngeq: '\\u2271',\n\tngeqq: '\\u2267\\u0338',\n\tngeqslant: '\\u2A7E\\u0338',\n\tnges: '\\u2A7E\\u0338',\n\tnGg: '\\u22D9\\u0338',\n\tngsim: '\\u2275',\n\tnGt: '\\u226B\\u20D2',\n\tngt: '\\u226F',\n\tngtr: '\\u226F',\n\tnGtv: '\\u226B\\u0338',\n\tnhArr: '\\u21CE',\n\tnharr: '\\u21AE',\n\tnhpar: '\\u2AF2',\n\tni: '\\u220B',\n\tnis: '\\u22FC',\n\tnisd: '\\u22FA',\n\tniv: '\\u220B',\n\tNJcy: '\\u040A',\n\tnjcy: '\\u045A',\n\tnlArr: '\\u21CD',\n\tnlarr: '\\u219A',\n\tnldr: '\\u2025',\n\tnlE: '\\u2266\\u0338',\n\tnle: '\\u2270',\n\tnLeftarrow: '\\u21CD',\n\tnleftarrow: '\\u219A',\n\tnLeftrightarrow: '\\u21CE',\n\tnleftrightarrow: '\\u21AE',\n\tnleq: '\\u2270',\n\tnleqq: '\\u2266\\u0338',\n\tnleqslant: '\\u2A7D\\u0338',\n\tnles: '\\u2A7D\\u0338',\n\tnless: '\\u226E',\n\tnLl: '\\u22D8\\u0338',\n\tnlsim: '\\u2274',\n\tnLt: '\\u226A\\u20D2',\n\tnlt: '\\u226E',\n\tnltri: '\\u22EA',\n\tnltrie: '\\u22EC',\n\tnLtv: '\\u226A\\u0338',\n\tnmid: '\\u2224',\n\tNoBreak: '\\u2060',\n\tNonBreakingSpace: '\\u00A0',\n\tNopf: '\\u2115',\n\tnopf: '\\uD835\\uDD5F',\n\tNot: '\\u2AEC',\n\tnot: '\\u00AC',\n\tNotCongruent: '\\u2262',\n\tNotCupCap: '\\u226D',\n\tNotDoubleVerticalBar: '\\u2226',\n\tNotElement: '\\u2209',\n\tNotEqual: '\\u2260',\n\tNotEqualTilde: '\\u2242\\u0338',\n\tNotExists: '\\u2204',\n\tNotGreater: '\\u226F',\n\tNotGreaterEqual: '\\u2271',\n\tNotGreaterFullEqual: '\\u2267\\u0338',\n\tNotGreaterGreater: '\\u226B\\u0338',\n\tNotGreaterLess: '\\u2279',\n\tNotGreaterSlantEqual: '\\u2A7E\\u0338',\n\tNotGreaterTilde: '\\u2275',\n\tNotHumpDownHump: '\\u224E\\u0338',\n\tNotHumpEqual: '\\u224F\\u0338',\n\tnotin: '\\u2209',\n\tnotindot: '\\u22F5\\u0338',\n\tnotinE: '\\u22F9\\u0338',\n\tnotinva: '\\u2209',\n\tnotinvb: '\\u22F7',\n\tnotinvc: '\\u22F6',\n\tNotLeftTriangle: '\\u22EA',\n\tNotLeftTriangleBar: '\\u29CF\\u0338',\n\tNotLeftTriangleEqual: '\\u22EC',\n\tNotLess: '\\u226E',\n\tNotLessEqual: '\\u2270',\n\tNotLessGreater: '\\u2278',\n\tNotLessLess: '\\u226A\\u0338',\n\tNotLessSlantEqual: '\\u2A7D\\u0338',\n\tNotLessTilde: '\\u2274',\n\tNotNestedGreaterGreater: '\\u2AA2\\u0338',\n\tNotNestedLessLess: '\\u2AA1\\u0338',\n\tnotni: '\\u220C',\n\tnotniva: '\\u220C',\n\tnotnivb: '\\u22FE',\n\tnotnivc: '\\u22FD',\n\tNotPrecedes: '\\u2280',\n\tNotPrecedesEqual: '\\u2AAF\\u0338',\n\tNotPrecedesSlantEqual: '\\u22E0',\n\tNotReverseElement: '\\u220C',\n\tNotRightTriangle: '\\u22EB',\n\tNotRightTriangleBar: '\\u29D0\\u0338',\n\tNotRightTriangleEqual: '\\u22ED',\n\tNotSquareSubset: '\\u228F\\u0338',\n\tNotSquareSubsetEqual: '\\u22E2',\n\tNotSquareSuperset: '\\u2290\\u0338',\n\tNotSquareSupersetEqual: '\\u22E3',\n\tNotSubset: '\\u2282\\u20D2',\n\tNotSubsetEqual: '\\u2288',\n\tNotSucceeds: '\\u2281',\n\tNotSucceedsEqual: '\\u2AB0\\u0338',\n\tNotSucceedsSlantEqual: '\\u22E1',\n\tNotSucceedsTilde: '\\u227F\\u0338',\n\tNotSuperset: '\\u2283\\u20D2',\n\tNotSupersetEqual: '\\u2289',\n\tNotTilde: '\\u2241',\n\tNotTildeEqual: '\\u2244',\n\tNotTildeFullEqual: '\\u2247',\n\tNotTildeTilde: '\\u2249',\n\tNotVerticalBar: '\\u2224',\n\tnpar: '\\u2226',\n\tnparallel: '\\u2226',\n\tnparsl: '\\u2AFD\\u20E5',\n\tnpart: '\\u2202\\u0338',\n\tnpolint: '\\u2A14',\n\tnpr: '\\u2280',\n\tnprcue: '\\u22E0',\n\tnpre: '\\u2AAF\\u0338',\n\tnprec: '\\u2280',\n\tnpreceq: '\\u2AAF\\u0338',\n\tnrArr: '\\u21CF',\n\tnrarr: '\\u219B',\n\tnrarrc: '\\u2933\\u0338',\n\tnrarrw: '\\u219D\\u0338',\n\tnRightarrow: '\\u21CF',\n\tnrightarrow: '\\u219B',\n\tnrtri: '\\u22EB',\n\tnrtrie: '\\u22ED',\n\tnsc: '\\u2281',\n\tnsccue: '\\u22E1',\n\tnsce: '\\u2AB0\\u0338',\n\tNscr: '\\uD835\\uDCA9',\n\tnscr: '\\uD835\\uDCC3',\n\tnshortmid: '\\u2224',\n\tnshortparallel: '\\u2226',\n\tnsim: '\\u2241',\n\tnsime: '\\u2244',\n\tnsimeq: '\\u2244',\n\tnsmid: '\\u2224',\n\tnspar: '\\u2226',\n\tnsqsube: '\\u22E2',\n\tnsqsupe: '\\u22E3',\n\tnsub: '\\u2284',\n\tnsubE: '\\u2AC5\\u0338',\n\tnsube: '\\u2288',\n\tnsubset: '\\u2282\\u20D2',\n\tnsubseteq: '\\u2288',\n\tnsubseteqq: '\\u2AC5\\u0338',\n\tnsucc: '\\u2281',\n\tnsucceq: '\\u2AB0\\u0338',\n\tnsup: '\\u2285',\n\tnsupE: '\\u2AC6\\u0338',\n\tnsupe: '\\u2289',\n\tnsupset: '\\u2283\\u20D2',\n\tnsupseteq: '\\u2289',\n\tnsupseteqq: '\\u2AC6\\u0338',\n\tntgl: '\\u2279',\n\tNtilde: '\\u00D1',\n\tntilde: '\\u00F1',\n\tntlg: '\\u2278',\n\tntriangleleft: '\\u22EA',\n\tntrianglelefteq: '\\u22EC',\n\tntriangleright: '\\u22EB',\n\tntrianglerighteq: '\\u22ED',\n\tNu: '\\u039D',\n\tnu: '\\u03BD',\n\tnum: '\\u0023',\n\tnumero: '\\u2116',\n\tnumsp: '\\u2007',\n\tnvap: '\\u224D\\u20D2',\n\tnVDash: '\\u22AF',\n\tnVdash: '\\u22AE',\n\tnvDash: '\\u22AD',\n\tnvdash: '\\u22AC',\n\tnvge: '\\u2265\\u20D2',\n\tnvgt: '\\u003E\\u20D2',\n\tnvHarr: '\\u2904',\n\tnvinfin: '\\u29DE',\n\tnvlArr: '\\u2902',\n\tnvle: '\\u2264\\u20D2',\n\tnvlt: '\\u003C\\u20D2',\n\tnvltrie: '\\u22B4\\u20D2',\n\tnvrArr: '\\u2903',\n\tnvrtrie: '\\u22B5\\u20D2',\n\tnvsim: '\\u223C\\u20D2',\n\tnwarhk: '\\u2923',\n\tnwArr: '\\u21D6',\n\tnwarr: '\\u2196',\n\tnwarrow: '\\u2196',\n\tnwnear: '\\u2927',\n\tOacute: '\\u00D3',\n\toacute: '\\u00F3',\n\toast: '\\u229B',\n\tocir: '\\u229A',\n\tOcirc: '\\u00D4',\n\tocirc: '\\u00F4',\n\tOcy: '\\u041E',\n\tocy: '\\u043E',\n\todash: '\\u229D',\n\tOdblac: '\\u0150',\n\todblac: '\\u0151',\n\todiv: '\\u2A38',\n\todot: '\\u2299',\n\todsold: '\\u29BC',\n\tOElig: '\\u0152',\n\toelig: '\\u0153',\n\tofcir: '\\u29BF',\n\tOfr: '\\uD835\\uDD12',\n\tofr: '\\uD835\\uDD2C',\n\togon: '\\u02DB',\n\tOgrave: '\\u00D2',\n\tograve: '\\u00F2',\n\togt: '\\u29C1',\n\tohbar: '\\u29B5',\n\tohm: '\\u03A9',\n\toint: '\\u222E',\n\tolarr: '\\u21BA',\n\tolcir: '\\u29BE',\n\tolcross: '\\u29BB',\n\toline: '\\u203E',\n\tolt: '\\u29C0',\n\tOmacr: '\\u014C',\n\tomacr: '\\u014D',\n\tOmega: '\\u03A9',\n\tomega: '\\u03C9',\n\tOmicron: '\\u039F',\n\tomicron: '\\u03BF',\n\tomid: '\\u29B6',\n\tominus: '\\u2296',\n\tOopf: '\\uD835\\uDD46',\n\toopf: '\\uD835\\uDD60',\n\topar: '\\u29B7',\n\tOpenCurlyDoubleQuote: '\\u201C',\n\tOpenCurlyQuote: '\\u2018',\n\toperp: '\\u29B9',\n\toplus: '\\u2295',\n\tOr: '\\u2A54',\n\tor: '\\u2228',\n\torarr: '\\u21BB',\n\tord: '\\u2A5D',\n\torder: '\\u2134',\n\torderof: '\\u2134',\n\tordf: '\\u00AA',\n\tordm: '\\u00BA',\n\torigof: '\\u22B6',\n\toror: '\\u2A56',\n\torslope: '\\u2A57',\n\torv: '\\u2A5B',\n\toS: '\\u24C8',\n\tOscr: '\\uD835\\uDCAA',\n\toscr: '\\u2134',\n\tOslash: '\\u00D8',\n\toslash: '\\u00F8',\n\tosol: '\\u2298',\n\tOtilde: '\\u00D5',\n\totilde: '\\u00F5',\n\tOtimes: '\\u2A37',\n\totimes: '\\u2297',\n\totimesas: '\\u2A36',\n\tOuml: '\\u00D6',\n\touml: '\\u00F6',\n\tovbar: '\\u233D',\n\tOverBar: '\\u203E',\n\tOverBrace: '\\u23DE',\n\tOverBracket: '\\u23B4',\n\tOverParenthesis: '\\u23DC',\n\tpar: '\\u2225',\n\tpara: '\\u00B6',\n\tparallel: '\\u2225',\n\tparsim: '\\u2AF3',\n\tparsl: '\\u2AFD',\n\tpart: '\\u2202',\n\tPartialD: '\\u2202',\n\tPcy: '\\u041F',\n\tpcy: '\\u043F',\n\tpercnt: '\\u0025',\n\tperiod: '\\u002E',\n\tpermil: '\\u2030',\n\tperp: '\\u22A5',\n\tpertenk: '\\u2031',\n\tPfr: '\\uD835\\uDD13',\n\tpfr: '\\uD835\\uDD2D',\n\tPhi: '\\u03A6',\n\tphi: '\\u03C6',\n\tphiv: '\\u03D5',\n\tphmmat: '\\u2133',\n\tphone: '\\u260E',\n\tPi: '\\u03A0',\n\tpi: '\\u03C0',\n\tpitchfork: '\\u22D4',\n\tpiv: '\\u03D6',\n\tplanck: '\\u210F',\n\tplanckh: '\\u210E',\n\tplankv: '\\u210F',\n\tplus: '\\u002B',\n\tplusacir: '\\u2A23',\n\tplusb: '\\u229E',\n\tpluscir: '\\u2A22',\n\tplusdo: '\\u2214',\n\tplusdu: '\\u2A25',\n\tpluse: '\\u2A72',\n\tPlusMinus: '\\u00B1',\n\tplusmn: '\\u00B1',\n\tplussim: '\\u2A26',\n\tplustwo: '\\u2A27',\n\tpm: '\\u00B1',\n\tPoincareplane: '\\u210C',\n\tpointint: '\\u2A15',\n\tPopf: '\\u2119',\n\tpopf: '\\uD835\\uDD61',\n\tpound: '\\u00A3',\n\tPr: '\\u2ABB',\n\tpr: '\\u227A',\n\tprap: '\\u2AB7',\n\tprcue: '\\u227C',\n\tprE: '\\u2AB3',\n\tpre: '\\u2AAF',\n\tprec: '\\u227A',\n\tprecapprox: '\\u2AB7',\n\tpreccurlyeq: '\\u227C',\n\tPrecedes: '\\u227A',\n\tPrecedesEqual: '\\u2AAF',\n\tPrecedesSlantEqual: '\\u227C',\n\tPrecedesTilde: '\\u227E',\n\tpreceq: '\\u2AAF',\n\tprecnapprox: '\\u2AB9',\n\tprecneqq: '\\u2AB5',\n\tprecnsim: '\\u22E8',\n\tprecsim: '\\u227E',\n\tPrime: '\\u2033',\n\tprime: '\\u2032',\n\tprimes: '\\u2119',\n\tprnap: '\\u2AB9',\n\tprnE: '\\u2AB5',\n\tprnsim: '\\u22E8',\n\tprod: '\\u220F',\n\tProduct: '\\u220F',\n\tprofalar: '\\u232E',\n\tprofline: '\\u2312',\n\tprofsurf: '\\u2313',\n\tprop: '\\u221D',\n\tProportion: '\\u2237',\n\tProportional: '\\u221D',\n\tpropto: '\\u221D',\n\tprsim: '\\u227E',\n\tprurel: '\\u22B0',\n\tPscr: '\\uD835\\uDCAB',\n\tpscr: '\\uD835\\uDCC5',\n\tPsi: '\\u03A8',\n\tpsi: '\\u03C8',\n\tpuncsp: '\\u2008',\n\tQfr: '\\uD835\\uDD14',\n\tqfr: '\\uD835\\uDD2E',\n\tqint: '\\u2A0C',\n\tQopf: '\\u211A',\n\tqopf: '\\uD835\\uDD62',\n\tqprime: '\\u2057',\n\tQscr: '\\uD835\\uDCAC',\n\tqscr: '\\uD835\\uDCC6',\n\tquaternions: '\\u210D',\n\tquatint: '\\u2A16',\n\tquest: '\\u003F',\n\tquesteq: '\\u225F',\n\tQUOT: '\\u0022',\n\tquot: '\\u0022',\n\trAarr: '\\u21DB',\n\trace: '\\u223D\\u0331',\n\tRacute: '\\u0154',\n\tracute: '\\u0155',\n\tradic: '\\u221A',\n\traemptyv: '\\u29B3',\n\tRang: '\\u27EB',\n\trang: '\\u27E9',\n\trangd: '\\u2992',\n\trange: '\\u29A5',\n\trangle: '\\u27E9',\n\traquo: '\\u00BB',\n\tRarr: '\\u21A0',\n\trArr: '\\u21D2',\n\trarr: '\\u2192',\n\trarrap: '\\u2975',\n\trarrb: '\\u21E5',\n\trarrbfs: '\\u2920',\n\trarrc: '\\u2933',\n\trarrfs: '\\u291E',\n\trarrhk: '\\u21AA',\n\trarrlp: '\\u21AC',\n\trarrpl: '\\u2945',\n\trarrsim: '\\u2974',\n\tRarrtl: '\\u2916',\n\trarrtl: '\\u21A3',\n\trarrw: '\\u219D',\n\trAtail: '\\u291C',\n\tratail: '\\u291A',\n\tratio: '\\u2236',\n\trationals: '\\u211A',\n\tRBarr: '\\u2910',\n\trBarr: '\\u290F',\n\trbarr: '\\u290D',\n\trbbrk: '\\u2773',\n\trbrace: '\\u007D',\n\trbrack: '\\u005D',\n\trbrke: '\\u298C',\n\trbrksld: '\\u298E',\n\trbrkslu: '\\u2990',\n\tRcaron: '\\u0158',\n\trcaron: '\\u0159',\n\tRcedil: '\\u0156',\n\trcedil: '\\u0157',\n\trceil: '\\u2309',\n\trcub: '\\u007D',\n\tRcy: '\\u0420',\n\trcy: '\\u0440',\n\trdca: '\\u2937',\n\trdldhar: '\\u2969',\n\trdquo: '\\u201D',\n\trdquor: '\\u201D',\n\trdsh: '\\u21B3',\n\tRe: '\\u211C',\n\treal: '\\u211C',\n\trealine: '\\u211B',\n\trealpart: '\\u211C',\n\treals: '\\u211D',\n\trect: '\\u25AD',\n\tREG: '\\u00AE',\n\treg: '\\u00AE',\n\tReverseElement: '\\u220B',\n\tReverseEquilibrium: '\\u21CB',\n\tReverseUpEquilibrium: '\\u296F',\n\trfisht: '\\u297D',\n\trfloor: '\\u230B',\n\tRfr: '\\u211C',\n\trfr: '\\uD835\\uDD2F',\n\trHar: '\\u2964',\n\trhard: '\\u21C1',\n\trharu: '\\u21C0',\n\trharul: '\\u296C',\n\tRho: '\\u03A1',\n\trho: '\\u03C1',\n\trhov: '\\u03F1',\n\tRightAngleBracket: '\\u27E9',\n\tRightArrow: '\\u2192',\n\tRightarrow: '\\u21D2',\n\trightarrow: '\\u2192',\n\tRightArrowBar: '\\u21E5',\n\tRightArrowLeftArrow: '\\u21C4',\n\trightarrowtail: '\\u21A3',\n\tRightCeiling: '\\u2309',\n\tRightDoubleBracket: '\\u27E7',\n\tRightDownTeeVector: '\\u295D',\n\tRightDownVector: '\\u21C2',\n\tRightDownVectorBar: '\\u2955',\n\tRightFloor: '\\u230B',\n\trightharpoondown: '\\u21C1',\n\trightharpoonup: '\\u21C0',\n\trightleftarrows: '\\u21C4',\n\trightleftharpoons: '\\u21CC',\n\trightrightarrows: '\\u21C9',\n\trightsquigarrow: '\\u219D',\n\tRightTee: '\\u22A2',\n\tRightTeeArrow: '\\u21A6',\n\tRightTeeVector: '\\u295B',\n\trightthreetimes: '\\u22CC',\n\tRightTriangle: '\\u22B3',\n\tRightTriangleBar: '\\u29D0',\n\tRightTriangleEqual: '\\u22B5',\n\tRightUpDownVector: '\\u294F',\n\tRightUpTeeVector: '\\u295C',\n\tRightUpVector: '\\u21BE',\n\tRightUpVectorBar: '\\u2954',\n\tRightVector: '\\u21C0',\n\tRightVectorBar: '\\u2953',\n\tring: '\\u02DA',\n\trisingdotseq: '\\u2253',\n\trlarr: '\\u21C4',\n\trlhar: '\\u21CC',\n\trlm: '\\u200F',\n\trmoust: '\\u23B1',\n\trmoustache: '\\u23B1',\n\trnmid: '\\u2AEE',\n\troang: '\\u27ED',\n\troarr: '\\u21FE',\n\trobrk: '\\u27E7',\n\tropar: '\\u2986',\n\tRopf: '\\u211D',\n\tropf: '\\uD835\\uDD63',\n\troplus: '\\u2A2E',\n\trotimes: '\\u2A35',\n\tRoundImplies: '\\u2970',\n\trpar: '\\u0029',\n\trpargt: '\\u2994',\n\trppolint: '\\u2A12',\n\trrarr: '\\u21C9',\n\tRrightarrow: '\\u21DB',\n\trsaquo: '\\u203A',\n\tRscr: '\\u211B',\n\trscr: '\\uD835\\uDCC7',\n\tRsh: '\\u21B1',\n\trsh: '\\u21B1',\n\trsqb: '\\u005D',\n\trsquo: '\\u2019',\n\trsquor: '\\u2019',\n\trthree: '\\u22CC',\n\trtimes: '\\u22CA',\n\trtri: '\\u25B9',\n\trtrie: '\\u22B5',\n\trtrif: '\\u25B8',\n\trtriltri: '\\u29CE',\n\tRuleDelayed: '\\u29F4',\n\truluhar: '\\u2968',\n\trx: '\\u211E',\n\tSacute: '\\u015A',\n\tsacute: '\\u015B',\n\tsbquo: '\\u201A',\n\tSc: '\\u2ABC',\n\tsc: '\\u227B',\n\tscap: '\\u2AB8',\n\tScaron: '\\u0160',\n\tscaron: '\\u0161',\n\tsccue: '\\u227D',\n\tscE: '\\u2AB4',\n\tsce: '\\u2AB0',\n\tScedil: '\\u015E',\n\tscedil: '\\u015F',\n\tScirc: '\\u015C',\n\tscirc: '\\u015D',\n\tscnap: '\\u2ABA',\n\tscnE: '\\u2AB6',\n\tscnsim: '\\u22E9',\n\tscpolint: '\\u2A13',\n\tscsim: '\\u227F',\n\tScy: '\\u0421',\n\tscy: '\\u0441',\n\tsdot: '\\u22C5',\n\tsdotb: '\\u22A1',\n\tsdote: '\\u2A66',\n\tsearhk: '\\u2925',\n\tseArr: '\\u21D8',\n\tsearr: '\\u2198',\n\tsearrow: '\\u2198',\n\tsect: '\\u00A7',\n\tsemi: '\\u003B',\n\tseswar: '\\u2929',\n\tsetminus: '\\u2216',\n\tsetmn: '\\u2216',\n\tsext: '\\u2736',\n\tSfr: '\\uD835\\uDD16',\n\tsfr: '\\uD835\\uDD30',\n\tsfrown: '\\u2322',\n\tsharp: '\\u266F',\n\tSHCHcy: '\\u0429',\n\tshchcy: '\\u0449',\n\tSHcy: '\\u0428',\n\tshcy: '\\u0448',\n\tShortDownArrow: '\\u2193',\n\tShortLeftArrow: '\\u2190',\n\tshortmid: '\\u2223',\n\tshortparallel: '\\u2225',\n\tShortRightArrow: '\\u2192',\n\tShortUpArrow: '\\u2191',\n\tshy: '\\u00AD',\n\tSigma: '\\u03A3',\n\tsigma: '\\u03C3',\n\tsigmaf: '\\u03C2',\n\tsigmav: '\\u03C2',\n\tsim: '\\u223C',\n\tsimdot: '\\u2A6A',\n\tsime: '\\u2243',\n\tsimeq: '\\u2243',\n\tsimg: '\\u2A9E',\n\tsimgE: '\\u2AA0',\n\tsiml: '\\u2A9D',\n\tsimlE: '\\u2A9F',\n\tsimne: '\\u2246',\n\tsimplus: '\\u2A24',\n\tsimrarr: '\\u2972',\n\tslarr: '\\u2190',\n\tSmallCircle: '\\u2218',\n\tsmallsetminus: '\\u2216',\n\tsmashp: '\\u2A33',\n\tsmeparsl: '\\u29E4',\n\tsmid: '\\u2223',\n\tsmile: '\\u2323',\n\tsmt: '\\u2AAA',\n\tsmte: '\\u2AAC',\n\tsmtes: '\\u2AAC\\uFE00',\n\tSOFTcy: '\\u042C',\n\tsoftcy: '\\u044C',\n\tsol: '\\u002F',\n\tsolb: '\\u29C4',\n\tsolbar: '\\u233F',\n\tSopf: '\\uD835\\uDD4A',\n\tsopf: '\\uD835\\uDD64',\n\tspades: '\\u2660',\n\tspadesuit: '\\u2660',\n\tspar: '\\u2225',\n\tsqcap: '\\u2293',\n\tsqcaps: '\\u2293\\uFE00',\n\tsqcup: '\\u2294',\n\tsqcups: '\\u2294\\uFE00',\n\tSqrt: '\\u221A',\n\tsqsub: '\\u228F',\n\tsqsube: '\\u2291',\n\tsqsubset: '\\u228F',\n\tsqsubseteq: '\\u2291',\n\tsqsup: '\\u2290',\n\tsqsupe: '\\u2292',\n\tsqsupset: '\\u2290',\n\tsqsupseteq: '\\u2292',\n\tsqu: '\\u25A1',\n\tSquare: '\\u25A1',\n\tsquare: '\\u25A1',\n\tSquareIntersection: '\\u2293',\n\tSquareSubset: '\\u228F',\n\tSquareSubsetEqual: '\\u2291',\n\tSquareSuperset: '\\u2290',\n\tSquareSupersetEqual: '\\u2292',\n\tSquareUnion: '\\u2294',\n\tsquarf: '\\u25AA',\n\tsquf: '\\u25AA',\n\tsrarr: '\\u2192',\n\tSscr: '\\uD835\\uDCAE',\n\tsscr: '\\uD835\\uDCC8',\n\tssetmn: '\\u2216',\n\tssmile: '\\u2323',\n\tsstarf: '\\u22C6',\n\tStar: '\\u22C6',\n\tstar: '\\u2606',\n\tstarf: '\\u2605',\n\tstraightepsilon: '\\u03F5',\n\tstraightphi: '\\u03D5',\n\tstrns: '\\u00AF',\n\tSub: '\\u22D0',\n\tsub: '\\u2282',\n\tsubdot: '\\u2ABD',\n\tsubE: '\\u2AC5',\n\tsube: '\\u2286',\n\tsubedot: '\\u2AC3',\n\tsubmult: '\\u2AC1',\n\tsubnE: '\\u2ACB',\n\tsubne: '\\u228A',\n\tsubplus: '\\u2ABF',\n\tsubrarr: '\\u2979',\n\tSubset: '\\u22D0',\n\tsubset: '\\u2282',\n\tsubseteq: '\\u2286',\n\tsubseteqq: '\\u2AC5',\n\tSubsetEqual: '\\u2286',\n\tsubsetneq: '\\u228A',\n\tsubsetneqq: '\\u2ACB',\n\tsubsim: '\\u2AC7',\n\tsubsub: '\\u2AD5',\n\tsubsup: '\\u2AD3',\n\tsucc: '\\u227B',\n\tsuccapprox: '\\u2AB8',\n\tsucccurlyeq: '\\u227D',\n\tSucceeds: '\\u227B',\n\tSucceedsEqual: '\\u2AB0',\n\tSucceedsSlantEqual: '\\u227D',\n\tSucceedsTilde: '\\u227F',\n\tsucceq: '\\u2AB0',\n\tsuccnapprox: '\\u2ABA',\n\tsuccneqq: '\\u2AB6',\n\tsuccnsim: '\\u22E9',\n\tsuccsim: '\\u227F',\n\tSuchThat: '\\u220B',\n\tSum: '\\u2211',\n\tsum: '\\u2211',\n\tsung: '\\u266A',\n\tSup: '\\u22D1',\n\tsup: '\\u2283',\n\tsup1: '\\u00B9',\n\tsup2: '\\u00B2',\n\tsup3: '\\u00B3',\n\tsupdot: '\\u2ABE',\n\tsupdsub: '\\u2AD8',\n\tsupE: '\\u2AC6',\n\tsupe: '\\u2287',\n\tsupedot: '\\u2AC4',\n\tSuperset: '\\u2283',\n\tSupersetEqual: '\\u2287',\n\tsuphsol: '\\u27C9',\n\tsuphsub: '\\u2AD7',\n\tsuplarr: '\\u297B',\n\tsupmult: '\\u2AC2',\n\tsupnE: '\\u2ACC',\n\tsupne: '\\u228B',\n\tsupplus: '\\u2AC0',\n\tSupset: '\\u22D1',\n\tsupset: '\\u2283',\n\tsupseteq: '\\u2287',\n\tsupseteqq: '\\u2AC6',\n\tsupsetneq: '\\u228B',\n\tsupsetneqq: '\\u2ACC',\n\tsupsim: '\\u2AC8',\n\tsupsub: '\\u2AD4',\n\tsupsup: '\\u2AD6',\n\tswarhk: '\\u2926',\n\tswArr: '\\u21D9',\n\tswarr: '\\u2199',\n\tswarrow: '\\u2199',\n\tswnwar: '\\u292A',\n\tszlig: '\\u00DF',\n\tTab: '\\u0009',\n\ttarget: '\\u2316',\n\tTau: '\\u03A4',\n\ttau: '\\u03C4',\n\ttbrk: '\\u23B4',\n\tTcaron: '\\u0164',\n\ttcaron: '\\u0165',\n\tTcedil: '\\u0162',\n\ttcedil: '\\u0163',\n\tTcy: '\\u0422',\n\ttcy: '\\u0442',\n\ttdot: '\\u20DB',\n\ttelrec: '\\u2315',\n\tTfr: '\\uD835\\uDD17',\n\ttfr: '\\uD835\\uDD31',\n\tthere4: '\\u2234',\n\tTherefore: '\\u2234',\n\ttherefore: '\\u2234',\n\tTheta: '\\u0398',\n\ttheta: '\\u03B8',\n\tthetasym: '\\u03D1',\n\tthetav: '\\u03D1',\n\tthickapprox: '\\u2248',\n\tthicksim: '\\u223C',\n\tThickSpace: '\\u205F\\u200A',\n\tthinsp: '\\u2009',\n\tThinSpace: '\\u2009',\n\tthkap: '\\u2248',\n\tthksim: '\\u223C',\n\tTHORN: '\\u00DE',\n\tthorn: '\\u00FE',\n\tTilde: '\\u223C',\n\ttilde: '\\u02DC',\n\tTildeEqual: '\\u2243',\n\tTildeFullEqual: '\\u2245',\n\tTildeTilde: '\\u2248',\n\ttimes: '\\u00D7',\n\ttimesb: '\\u22A0',\n\ttimesbar: '\\u2A31',\n\ttimesd: '\\u2A30',\n\ttint: '\\u222D',\n\ttoea: '\\u2928',\n\ttop: '\\u22A4',\n\ttopbot: '\\u2336',\n\ttopcir: '\\u2AF1',\n\tTopf: '\\uD835\\uDD4B',\n\ttopf: '\\uD835\\uDD65',\n\ttopfork: '\\u2ADA',\n\ttosa: '\\u2929',\n\ttprime: '\\u2034',\n\tTRADE: '\\u2122',\n\ttrade: '\\u2122',\n\ttriangle: '\\u25B5',\n\ttriangledown: '\\u25BF',\n\ttriangleleft: '\\u25C3',\n\ttrianglelefteq: '\\u22B4',\n\ttriangleq: '\\u225C',\n\ttriangleright: '\\u25B9',\n\ttrianglerighteq: '\\u22B5',\n\ttridot: '\\u25EC',\n\ttrie: '\\u225C',\n\ttriminus: '\\u2A3A',\n\tTripleDot: '\\u20DB',\n\ttriplus: '\\u2A39',\n\ttrisb: '\\u29CD',\n\ttritime: '\\u2A3B',\n\ttrpezium: '\\u23E2',\n\tTscr: '\\uD835\\uDCAF',\n\ttscr: '\\uD835\\uDCC9',\n\tTScy: '\\u0426',\n\ttscy: '\\u0446',\n\tTSHcy: '\\u040B',\n\ttshcy: '\\u045B',\n\tTstrok: '\\u0166',\n\ttstrok: '\\u0167',\n\ttwixt: '\\u226C',\n\ttwoheadleftarrow: '\\u219E',\n\ttwoheadrightarrow: '\\u21A0',\n\tUacute: '\\u00DA',\n\tuacute: '\\u00FA',\n\tUarr: '\\u219F',\n\tuArr: '\\u21D1',\n\tuarr: '\\u2191',\n\tUarrocir: '\\u2949',\n\tUbrcy: '\\u040E',\n\tubrcy: '\\u045E',\n\tUbreve: '\\u016C',\n\tubreve: '\\u016D',\n\tUcirc: '\\u00DB',\n\tucirc: '\\u00FB',\n\tUcy: '\\u0423',\n\tucy: '\\u0443',\n\tudarr: '\\u21C5',\n\tUdblac: '\\u0170',\n\tudblac: '\\u0171',\n\tudhar: '\\u296E',\n\tufisht: '\\u297E',\n\tUfr: '\\uD835\\uDD18',\n\tufr: '\\uD835\\uDD32',\n\tUgrave: '\\u00D9',\n\tugrave: '\\u00F9',\n\tuHar: '\\u2963',\n\tuharl: '\\u21BF',\n\tuharr: '\\u21BE',\n\tuhblk: '\\u2580',\n\tulcorn: '\\u231C',\n\tulcorner: '\\u231C',\n\tulcrop: '\\u230F',\n\tultri: '\\u25F8',\n\tUmacr: '\\u016A',\n\tumacr: '\\u016B',\n\tuml: '\\u00A8',\n\tUnderBar: '\\u005F',\n\tUnderBrace: '\\u23DF',\n\tUnderBracket: '\\u23B5',\n\tUnderParenthesis: '\\u23DD',\n\tUnion: '\\u22C3',\n\tUnionPlus: '\\u228E',\n\tUogon: '\\u0172',\n\tuogon: '\\u0173',\n\tUopf: '\\uD835\\uDD4C',\n\tuopf: '\\uD835\\uDD66',\n\tUpArrow: '\\u2191',\n\tUparrow: '\\u21D1',\n\tuparrow: '\\u2191',\n\tUpArrowBar: '\\u2912',\n\tUpArrowDownArrow: '\\u21C5',\n\tUpDownArrow: '\\u2195',\n\tUpdownarrow: '\\u21D5',\n\tupdownarrow: '\\u2195',\n\tUpEquilibrium: '\\u296E',\n\tupharpoonleft: '\\u21BF',\n\tupharpoonright: '\\u21BE',\n\tuplus: '\\u228E',\n\tUpperLeftArrow: '\\u2196',\n\tUpperRightArrow: '\\u2197',\n\tUpsi: '\\u03D2',\n\tupsi: '\\u03C5',\n\tupsih: '\\u03D2',\n\tUpsilon: '\\u03A5',\n\tupsilon: '\\u03C5',\n\tUpTee: '\\u22A5',\n\tUpTeeArrow: '\\u21A5',\n\tupuparrows: '\\u21C8',\n\turcorn: '\\u231D',\n\turcorner: '\\u231D',\n\turcrop: '\\u230E',\n\tUring: '\\u016E',\n\turing: '\\u016F',\n\turtri: '\\u25F9',\n\tUscr: '\\uD835\\uDCB0',\n\tuscr: '\\uD835\\uDCCA',\n\tutdot: '\\u22F0',\n\tUtilde: '\\u0168',\n\tutilde: '\\u0169',\n\tutri: '\\u25B5',\n\tutrif: '\\u25B4',\n\tuuarr: '\\u21C8',\n\tUuml: '\\u00DC',\n\tuuml: '\\u00FC',\n\tuwangle: '\\u29A7',\n\tvangrt: '\\u299C',\n\tvarepsilon: '\\u03F5',\n\tvarkappa: '\\u03F0',\n\tvarnothing: '\\u2205',\n\tvarphi: '\\u03D5',\n\tvarpi: '\\u03D6',\n\tvarpropto: '\\u221D',\n\tvArr: '\\u21D5',\n\tvarr: '\\u2195',\n\tvarrho: '\\u03F1',\n\tvarsigma: '\\u03C2',\n\tvarsubsetneq: '\\u228A\\uFE00',\n\tvarsubsetneqq: '\\u2ACB\\uFE00',\n\tvarsupsetneq: '\\u228B\\uFE00',\n\tvarsupsetneqq: '\\u2ACC\\uFE00',\n\tvartheta: '\\u03D1',\n\tvartriangleleft: '\\u22B2',\n\tvartriangleright: '\\u22B3',\n\tVbar: '\\u2AEB',\n\tvBar: '\\u2AE8',\n\tvBarv: '\\u2AE9',\n\tVcy: '\\u0412',\n\tvcy: '\\u0432',\n\tVDash: '\\u22AB',\n\tVdash: '\\u22A9',\n\tvDash: '\\u22A8',\n\tvdash: '\\u22A2',\n\tVdashl: '\\u2AE6',\n\tVee: '\\u22C1',\n\tvee: '\\u2228',\n\tveebar: '\\u22BB',\n\tveeeq: '\\u225A',\n\tvellip: '\\u22EE',\n\tVerbar: '\\u2016',\n\tverbar: '\\u007C',\n\tVert: '\\u2016',\n\tvert: '\\u007C',\n\tVerticalBar: '\\u2223',\n\tVerticalLine: '\\u007C',\n\tVerticalSeparator: '\\u2758',\n\tVerticalTilde: '\\u2240',\n\tVeryThinSpace: '\\u200A',\n\tVfr: '\\uD835\\uDD19',\n\tvfr: '\\uD835\\uDD33',\n\tvltri: '\\u22B2',\n\tvnsub: '\\u2282\\u20D2',\n\tvnsup: '\\u2283\\u20D2',\n\tVopf: '\\uD835\\uDD4D',\n\tvopf: '\\uD835\\uDD67',\n\tvprop: '\\u221D',\n\tvrtri: '\\u22B3',\n\tVscr: '\\uD835\\uDCB1',\n\tvscr: '\\uD835\\uDCCB',\n\tvsubnE: '\\u2ACB\\uFE00',\n\tvsubne: '\\u228A\\uFE00',\n\tvsupnE: '\\u2ACC\\uFE00',\n\tvsupne: '\\u228B\\uFE00',\n\tVvdash: '\\u22AA',\n\tvzigzag: '\\u299A',\n\tWcirc: '\\u0174',\n\twcirc: '\\u0175',\n\twedbar: '\\u2A5F',\n\tWedge: '\\u22C0',\n\twedge: '\\u2227',\n\twedgeq: '\\u2259',\n\tweierp: '\\u2118',\n\tWfr: '\\uD835\\uDD1A',\n\twfr: '\\uD835\\uDD34',\n\tWopf: '\\uD835\\uDD4E',\n\twopf: '\\uD835\\uDD68',\n\twp: '\\u2118',\n\twr: '\\u2240',\n\twreath: '\\u2240',\n\tWscr: '\\uD835\\uDCB2',\n\twscr: '\\uD835\\uDCCC',\n\txcap: '\\u22C2',\n\txcirc: '\\u25EF',\n\txcup: '\\u22C3',\n\txdtri: '\\u25BD',\n\tXfr: '\\uD835\\uDD1B',\n\txfr: '\\uD835\\uDD35',\n\txhArr: '\\u27FA',\n\txharr: '\\u27F7',\n\tXi: '\\u039E',\n\txi: '\\u03BE',\n\txlArr: '\\u27F8',\n\txlarr: '\\u27F5',\n\txmap: '\\u27FC',\n\txnis: '\\u22FB',\n\txodot: '\\u2A00',\n\tXopf: '\\uD835\\uDD4F',\n\txopf: '\\uD835\\uDD69',\n\txoplus: '\\u2A01',\n\txotime: '\\u2A02',\n\txrArr: '\\u27F9',\n\txrarr: '\\u27F6',\n\tXscr: '\\uD835\\uDCB3',\n\txscr: '\\uD835\\uDCCD',\n\txsqcup: '\\u2A06',\n\txuplus: '\\u2A04',\n\txutri: '\\u25B3',\n\txvee: '\\u22C1',\n\txwedge: '\\u22C0',\n\tYacute: '\\u00DD',\n\tyacute: '\\u00FD',\n\tYAcy: '\\u042F',\n\tyacy: '\\u044F',\n\tYcirc: '\\u0176',\n\tycirc: '\\u0177',\n\tYcy: '\\u042B',\n\tycy: '\\u044B',\n\tyen: '\\u00A5',\n\tYfr: '\\uD835\\uDD1C',\n\tyfr: '\\uD835\\uDD36',\n\tYIcy: '\\u0407',\n\tyicy: '\\u0457',\n\tYopf: '\\uD835\\uDD50',\n\tyopf: '\\uD835\\uDD6A',\n\tYscr: '\\uD835\\uDCB4',\n\tyscr: '\\uD835\\uDCCE',\n\tYUcy: '\\u042E',\n\tyucy: '\\u044E',\n\tYuml: '\\u0178',\n\tyuml: '\\u00FF',\n\tZacute: '\\u0179',\n\tzacute: '\\u017A',\n\tZcaron: '\\u017D',\n\tzcaron: '\\u017E',\n\tZcy: '\\u0417',\n\tzcy: '\\u0437',\n\tZdot: '\\u017B',\n\tzdot: '\\u017C',\n\tzeetrf: '\\u2128',\n\tZeroWidthSpace: '\\u200B',\n\tZeta: '\\u0396',\n\tzeta: '\\u03B6',\n\tZfr: '\\u2128',\n\tzfr: '\\uD835\\uDD37',\n\tZHcy: '\\u0416',\n\tzhcy: '\\u0436',\n\tzigrarr: '\\u21DD',\n\tZopf: '\\u2124',\n\tzopf: '\\uD835\\uDD6B',\n\tZscr: '\\uD835\\uDCB5',\n\tzscr: '\\uD835\\uDCCF',\n\tzwj: '\\u200D',\n\tzwnj: '\\u200C',\n});\n\n/**\n * @deprecated use `HTML_ENTITIES` instead\n * @see HTML_ENTITIES\n */\nexports.entityMap = exports.HTML_ENTITIES;\n", "var NAMESPACE = require(\"./conventions\").NAMESPACE;\n\n//[4] \tNameStartChar\t ::= \t\":\" | [A-Z] | \"_\" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] | [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] | [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] | [#x10000-#xEFFFF]\n//[4a] \tNameChar\t ::= \tNameStartChar | \"-\" | \".\" | [0-9] | #xB7 | [#x0300-#x036F] | [#x203F-#x2040]\n//[5] \tName\t ::= \tNameStartChar (NameChar)*\nvar nameStartChar = /[A-Z_a-z\\xC0-\\xD6\\xD8-\\xF6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD]///\\u10000-\\uEFFFF\nvar nameChar = new RegExp(\"[\\\\-\\\\.0-9\"+nameStartChar.source.slice(1,-1)+\"\\\\u00B7\\\\u0300-\\\\u036F\\\\u203F-\\\\u2040]\");\nvar tagNamePattern = new RegExp('^'+nameStartChar.source+nameChar.source+'*(?:\\:'+nameStartChar.source+nameChar.source+'*)?$');\n//var tagNamePattern = /^[a-zA-Z_][\\w\\-\\.]*(?:\\:[a-zA-Z_][\\w\\-\\.]*)?$/\n//var handlers = 'resolveEntity,getExternalSubset,characters,endDocument,endElement,endPrefixMapping,ignorableWhitespace,processingInstruction,setDocumentLocator,skippedEntity,startDocument,startElement,startPrefixMapping,notationDecl,unparsedEntityDecl,error,fatalError,warning,attributeDecl,elementDecl,externalEntityDecl,internalEntityDecl,comment,endCDATA,endDTD,endEntity,startCDATA,startDTD,startEntity'.split(',')\n\n//S_TAG,\tS_ATTR,\tS_EQ,\tS_ATTR_NOQUOT_VALUE\n//S_ATTR_SPACE,\tS_ATTR_END,\tS_TAG_SPACE, S_TAG_CLOSE\nvar S_TAG = 0;//tag name offerring\nvar S_ATTR = 1;//attr name offerring\nvar S_ATTR_SPACE=2;//attr name end and space offer\nvar S_EQ = 3;//=space?\nvar S_ATTR_NOQUOT_VALUE = 4;//attr value(no quot value only)\nvar S_ATTR_END = 5;//attr value end and no space(quot end)\nvar S_TAG_SPACE = 6;//(attr value end || tag end ) && (space offer)\nvar S_TAG_CLOSE = 7;//closed el\n\n/**\n * Creates an error that will not be caught by XMLReader aka the SAX parser.\n *\n * @param {string} message\n * @param {any?} locator Optional, can provide details about the location in the source\n * @constructor\n */\nfunction ParseError(message, locator) {\n\tthis.message = message\n\tthis.locator = locator\n\tif(Error.captureStackTrace) Error.captureStackTrace(this, ParseError);\n}\nParseError.prototype = new Error();\nParseError.prototype.name = ParseError.name\n\nfunction XMLReader(){\n\n}\n\nXMLReader.prototype = {\n\tparse:function(source,defaultNSMap,entityMap){\n\t\tvar domBuilder = this.domBuilder;\n\t\tdomBuilder.startDocument();\n\t\t_copy(defaultNSMap ,defaultNSMap = {})\n\t\tparse(source,defaultNSMap,entityMap,\n\t\t\t\tdomBuilder,this.errorHandler);\n\t\tdomBuilder.endDocument();\n\t}\n}\nfunction parse(source,defaultNSMapCopy,entityMap,domBuilder,errorHandler){\n\tfunction fixedFromCharCode(code) {\n\t\t// String.prototype.fromCharCode does not supports\n\t\t// > 2 bytes unicode chars directly\n\t\tif (code > 0xffff) {\n\t\t\tcode -= 0x10000;\n\t\t\tvar surrogate1 = 0xd800 + (code >> 10)\n\t\t\t\t, surrogate2 = 0xdc00 + (code & 0x3ff);\n\n\t\t\treturn String.fromCharCode(surrogate1, surrogate2);\n\t\t} else {\n\t\t\treturn String.fromCharCode(code);\n\t\t}\n\t}\n\tfunction entityReplacer(a){\n\t\tvar k = a.slice(1,-1);\n\t\tif (Object.hasOwnProperty.call(entityMap, k)) {\n\t\t\treturn entityMap[k];\n\t\t}else if(k.charAt(0) === '#'){\n\t\t\treturn fixedFromCharCode(parseInt(k.substr(1).replace('x','0x')))\n\t\t}else{\n\t\t\terrorHandler.error('entity not found:'+a);\n\t\t\treturn a;\n\t\t}\n\t}\n\tfunction appendText(end){//has some bugs\n\t\tif(end>start){\n\t\t\tvar xt = source.substring(start,end).replace(/&#?\\w+;/g,entityReplacer);\n\t\t\tlocator&&position(start);\n\t\t\tdomBuilder.characters(xt,0,end-start);\n\t\t\tstart = end\n\t\t}\n\t}\n\tfunction position(p,m){\n\t\twhile(p>=lineEnd && (m = linePattern.exec(source))){\n\t\t\tlineStart = m.index;\n\t\t\tlineEnd = lineStart + m[0].length;\n\t\t\tlocator.lineNumber++;\n\t\t\t//console.log('line++:',locator,startPos,endPos)\n\t\t}\n\t\tlocator.columnNumber = p-lineStart+1;\n\t}\n\tvar lineStart = 0;\n\tvar lineEnd = 0;\n\tvar linePattern = /.*(?:\\r\\n?|\\n)|.*$/g\n\tvar locator = domBuilder.locator;\n\n\tvar parseStack = [{currentNSMap:defaultNSMapCopy}]\n\tvar closeMap = {};\n\tvar start = 0;\n\twhile(true){\n\t\ttry{\n\t\t\tvar tagStart = source.indexOf('<',start);\n\t\t\tif(tagStart<0){\n\t\t\t\tif(!source.substr(start).match(/^\\s*$/)){\n\t\t\t\t\tvar doc = domBuilder.doc;\n\t \t\t\tvar text = doc.createTextNode(source.substr(start));\n\t \t\t\tdoc.appendChild(text);\n\t \t\t\tdomBuilder.currentElement = text;\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif(tagStart>start){\n\t\t\t\tappendText(tagStart);\n\t\t\t}\n\t\t\tswitch(source.charAt(tagStart+1)){\n\t\t\tcase '/':\n\t\t\t\tvar end = source.indexOf('>',tagStart+3);\n\t\t\t\tvar tagName = source.substring(tagStart + 2, end).replace(/[ \\t\\n\\r]+$/g, '');\n\t\t\t\tvar config = parseStack.pop();\n\t\t\t\tif(end<0){\n\n\t \t\ttagName = source.substring(tagStart+2).replace(/[\\s<].*/,'');\n\t \t\terrorHandler.error(\"end tag name: \"+tagName+' is not complete:'+config.tagName);\n\t \t\tend = tagStart+1+tagName.length;\n\t \t}else if(tagName.match(/\\s\n\t\t\t\tlocator&&position(tagStart);\n\t\t\t\tend = parseInstruction(source,tagStart,domBuilder);\n\t\t\t\tbreak;\n\t\t\tcase '!':// start){\n\t\t\tstart = end;\n\t\t}else{\n\t\t\t//TODO: \u8FD9\u91CC\u6709\u53EF\u80FDsax\u56DE\u9000\uFF0C\u6709\u4F4D\u7F6E\u9519\u8BEF\u98CE\u9669\n\t\t\tappendText(Math.max(tagStart,start)+1);\n\t\t}\n\t}\n}\nfunction copyLocator(f,t){\n\tt.lineNumber = f.lineNumber;\n\tt.columnNumber = f.columnNumber;\n\treturn t;\n}\n\n/**\n * @see #appendElement(source,elStartEnd,el,selfClosed,entityReplacer,domBuilder,parseStack);\n * @return end of the elementStartPart(end of elementEndPart for selfClosed el)\n */\nfunction parseElementStartPart(source,start,el,currentNSMap,entityReplacer,errorHandler){\n\n\t/**\n\t * @param {string} qname\n\t * @param {string} value\n\t * @param {number} startIndex\n\t */\n\tfunction addAttribute(qname, value, startIndex) {\n\t\tif (el.attributeNames.hasOwnProperty(qname)) {\n\t\t\terrorHandler.fatalError('Attribute ' + qname + ' redefined')\n\t\t}\n\t\tel.addValue(\n\t\t\tqname,\n\t\t\t// @see https://www.w3.org/TR/xml/#AVNormalize\n\t\t\t// since the xmldom sax parser does not \"interpret\" DTD the following is not implemented:\n\t\t\t// - recursive replacement of (DTD) entity references\n\t\t\t// - trimming and collapsing multiple spaces into a single one for attributes that are not of type CDATA\n\t\t\tvalue.replace(/[\\t\\n\\r]/g, ' ').replace(/&#?\\w+;/g, entityReplacer),\n\t\t\tstartIndex\n\t\t)\n\t}\n\tvar attrName;\n\tvar value;\n\tvar p = ++start;\n\tvar s = S_TAG;//status\n\twhile(true){\n\t\tvar c = source.charAt(p);\n\t\tswitch(c){\n\t\tcase '=':\n\t\t\tif(s === S_ATTR){//attrName\n\t\t\t\tattrName = source.slice(start,p);\n\t\t\t\ts = S_EQ;\n\t\t\t}else if(s === S_ATTR_SPACE){\n\t\t\t\ts = S_EQ;\n\t\t\t}else{\n\t\t\t\t//fatalError: equal must after attrName or space after attrName\n\t\t\t\tthrow new Error('attribute equal must after attrName'); // No known test case\n\t\t\t}\n\t\t\tbreak;\n\t\tcase '\\'':\n\t\tcase '\"':\n\t\t\tif(s === S_EQ || s === S_ATTR //|| s == S_ATTR_SPACE\n\t\t\t\t){//equal\n\t\t\t\tif(s === S_ATTR){\n\t\t\t\t\terrorHandler.warning('attribute value must after \"=\"')\n\t\t\t\t\tattrName = source.slice(start,p)\n\t\t\t\t}\n\t\t\t\tstart = p+1;\n\t\t\t\tp = source.indexOf(c,start)\n\t\t\t\tif(p>0){\n\t\t\t\t\tvalue = source.slice(start, p);\n\t\t\t\t\taddAttribute(attrName, value, start-1);\n\t\t\t\t\ts = S_ATTR_END;\n\t\t\t\t}else{\n\t\t\t\t\t//fatalError: no end quot match\n\t\t\t\t\tthrow new Error('attribute value no end \\''+c+'\\' match');\n\t\t\t\t}\n\t\t\t}else if(s == S_ATTR_NOQUOT_VALUE){\n\t\t\t\tvalue = source.slice(start, p);\n\t\t\t\taddAttribute(attrName, value, start);\n\t\t\t\terrorHandler.warning('attribute \"'+attrName+'\" missed start quot('+c+')!!');\n\t\t\t\tstart = p+1;\n\t\t\t\ts = S_ATTR_END\n\t\t\t}else{\n\t\t\t\t//fatalError: no equal before\n\t\t\t\tthrow new Error('attribute value must after \"=\"'); // No known test case\n\t\t\t}\n\t\t\tbreak;\n\t\tcase '/':\n\t\t\tswitch(s){\n\t\t\tcase S_TAG:\n\t\t\t\tel.setTagName(source.slice(start,p));\n\t\t\tcase S_ATTR_END:\n\t\t\tcase S_TAG_SPACE:\n\t\t\tcase S_TAG_CLOSE:\n\t\t\t\ts =S_TAG_CLOSE;\n\t\t\t\tel.closed = true;\n\t\t\tcase S_ATTR_NOQUOT_VALUE:\n\t\t\tcase S_ATTR:\n\t\t\t\tbreak;\n\t\t\t\tcase S_ATTR_SPACE:\n\t\t\t\t\tel.closed = true;\n\t\t\t\tbreak;\n\t\t\t//case S_EQ:\n\t\t\tdefault:\n\t\t\t\tthrow new Error(\"attribute invalid close char('/')\") // No known test case\n\t\t\t}\n\t\t\tbreak;\n\t\tcase ''://end document\n\t\t\terrorHandler.error('unexpected end of input');\n\t\t\tif(s == S_TAG){\n\t\t\t\tel.setTagName(source.slice(start,p));\n\t\t\t}\n\t\t\treturn p;\n\t\tcase '>':\n\t\t\tswitch(s){\n\t\t\tcase S_TAG:\n\t\t\t\tel.setTagName(source.slice(start,p));\n\t\t\tcase S_ATTR_END:\n\t\t\tcase S_TAG_SPACE:\n\t\t\tcase S_TAG_CLOSE:\n\t\t\t\tbreak;//normal\n\t\t\tcase S_ATTR_NOQUOT_VALUE://Compatible state\n\t\t\tcase S_ATTR:\n\t\t\t\tvalue = source.slice(start,p);\n\t\t\t\tif(value.slice(-1) === '/'){\n\t\t\t\t\tel.closed = true;\n\t\t\t\t\tvalue = value.slice(0,-1)\n\t\t\t\t}\n\t\t\tcase S_ATTR_SPACE:\n\t\t\t\tif(s === S_ATTR_SPACE){\n\t\t\t\t\tvalue = attrName;\n\t\t\t\t}\n\t\t\t\tif(s == S_ATTR_NOQUOT_VALUE){\n\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed quot(\")!');\n\t\t\t\t\taddAttribute(attrName, value, start)\n\t\t\t\t}else{\n\t\t\t\t\tif(!NAMESPACE.isHTML(currentNSMap['']) || !value.match(/^(?:disabled|checked|selected)$/i)){\n\t\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed value!! \"'+value+'\" instead!!')\n\t\t\t\t\t}\n\t\t\t\t\taddAttribute(value, value, start)\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase S_EQ:\n\t\t\t\tthrow new Error('attribute value missed!!');\n\t\t\t}\n//\t\t\tconsole.log(tagName,tagNamePattern,tagNamePattern.test(tagName))\n\t\t\treturn p;\n\t\t/*xml space '\\x20' | #x9 | #xD | #xA; */\n\t\tcase '\\u0080':\n\t\t\tc = ' ';\n\t\tdefault:\n\t\t\tif(c<= ' '){//space\n\t\t\t\tswitch(s){\n\t\t\t\tcase S_TAG:\n\t\t\t\t\tel.setTagName(source.slice(start,p));//tagName\n\t\t\t\t\ts = S_TAG_SPACE;\n\t\t\t\t\tbreak;\n\t\t\t\tcase S_ATTR:\n\t\t\t\t\tattrName = source.slice(start,p)\n\t\t\t\t\ts = S_ATTR_SPACE;\n\t\t\t\t\tbreak;\n\t\t\t\tcase S_ATTR_NOQUOT_VALUE:\n\t\t\t\t\tvar value = source.slice(start, p);\n\t\t\t\t\terrorHandler.warning('attribute \"'+value+'\" missed quot(\")!!');\n\t\t\t\t\taddAttribute(attrName, value, start)\n\t\t\t\tcase S_ATTR_END:\n\t\t\t\t\ts = S_TAG_SPACE;\n\t\t\t\t\tbreak;\n\t\t\t\t//case S_TAG_SPACE:\n\t\t\t\t//case S_EQ:\n\t\t\t\t//case S_ATTR_SPACE:\n\t\t\t\t//\tvoid();break;\n\t\t\t\t//case S_TAG_CLOSE:\n\t\t\t\t\t//ignore warning\n\t\t\t\t}\n\t\t\t}else{//not space\n//S_TAG,\tS_ATTR,\tS_EQ,\tS_ATTR_NOQUOT_VALUE\n//S_ATTR_SPACE,\tS_ATTR_END,\tS_TAG_SPACE, S_TAG_CLOSE\n\t\t\t\tswitch(s){\n\t\t\t\t//case S_TAG:void();break;\n\t\t\t\t//case S_ATTR:void();break;\n\t\t\t\t//case S_ATTR_NOQUOT_VALUE:void();break;\n\t\t\t\tcase S_ATTR_SPACE:\n\t\t\t\t\tvar tagName = el.tagName;\n\t\t\t\t\tif (!NAMESPACE.isHTML(currentNSMap['']) || !attrName.match(/^(?:disabled|checked|selected)$/i)) {\n\t\t\t\t\t\terrorHandler.warning('attribute \"'+attrName+'\" missed value!! \"'+attrName+'\" instead2!!')\n\t\t\t\t\t}\n\t\t\t\t\taddAttribute(attrName, attrName, start);\n\t\t\t\t\tstart = p;\n\t\t\t\t\ts = S_ATTR;\n\t\t\t\t\tbreak;\n\t\t\t\tcase S_ATTR_END:\n\t\t\t\t\terrorHandler.warning('attribute space is required\"'+attrName+'\"!!')\n\t\t\t\tcase S_TAG_SPACE:\n\t\t\t\t\ts = S_ATTR;\n\t\t\t\t\tstart = p;\n\t\t\t\t\tbreak;\n\t\t\t\tcase S_EQ:\n\t\t\t\t\ts = S_ATTR_NOQUOT_VALUE;\n\t\t\t\t\tstart = p;\n\t\t\t\t\tbreak;\n\t\t\t\tcase S_TAG_CLOSE:\n\t\t\t\t\tthrow new Error(\"elements closed character '/' and '>' must be connected to\");\n\t\t\t\t}\n\t\t\t}\n\t\t}//end outer switch\n\t\t//console.log('p++',p)\n\t\tp++;\n\t}\n}\n/**\n * @return true if has new namespace define\n */\nfunction appendElement(el,domBuilder,currentNSMap){\n\tvar tagName = el.tagName;\n\tvar localNSMap = null;\n\t//var currentNSMap = parseStack[parseStack.length-1].currentNSMap;\n\tvar i = el.length;\n\twhile(i--){\n\t\tvar a = el[i];\n\t\tvar qName = a.qName;\n\t\tvar value = a.value;\n\t\tvar nsp = qName.indexOf(':');\n\t\tif(nsp>0){\n\t\t\tvar prefix = a.prefix = qName.slice(0,nsp);\n\t\t\tvar localName = qName.slice(nsp+1);\n\t\t\tvar nsPrefix = prefix === 'xmlns' && localName\n\t\t}else{\n\t\t\tlocalName = qName;\n\t\t\tprefix = null\n\t\t\tnsPrefix = qName === 'xmlns' && ''\n\t\t}\n\t\t//can not set prefix,because prefix !== ''\n\t\ta.localName = localName ;\n\t\t//prefix == null for no ns prefix attribute\n\t\tif(nsPrefix !== false){//hack!!\n\t\t\tif(localNSMap == null){\n\t\t\t\tlocalNSMap = {}\n\t\t\t\t//console.log(currentNSMap,0)\n\t\t\t\t_copy(currentNSMap,currentNSMap={})\n\t\t\t\t//console.log(currentNSMap,1)\n\t\t\t}\n\t\t\tcurrentNSMap[nsPrefix] = localNSMap[nsPrefix] = value;\n\t\t\ta.uri = NAMESPACE.XMLNS\n\t\t\tdomBuilder.startPrefixMapping(nsPrefix, value)\n\t\t}\n\t}\n\tvar i = el.length;\n\twhile(i--){\n\t\ta = el[i];\n\t\tvar prefix = a.prefix;\n\t\tif(prefix){//no prefix attribute has no namespace\n\t\t\tif(prefix === 'xml'){\n\t\t\t\ta.uri = NAMESPACE.XML;\n\t\t\t}if(prefix !== 'xmlns'){\n\t\t\t\ta.uri = currentNSMap[prefix || '']\n\n\t\t\t\t//{console.log('###'+a.qName,domBuilder.locator.systemId+'',currentNSMap,a.uri)}\n\t\t\t}\n\t\t}\n\t}\n\tvar nsp = tagName.indexOf(':');\n\tif(nsp>0){\n\t\tprefix = el.prefix = tagName.slice(0,nsp);\n\t\tlocalName = el.localName = tagName.slice(nsp+1);\n\t}else{\n\t\tprefix = null;//important!!\n\t\tlocalName = el.localName = tagName;\n\t}\n\t//no prefix element has default namespace\n\tvar ns = el.uri = currentNSMap[prefix || ''];\n\tdomBuilder.startElement(ns,localName,tagName,el);\n\t//endPrefixMapping and startPrefixMapping have not any help for dom builder\n\t//localNSMap = null\n\tif(el.closed){\n\t\tdomBuilder.endElement(ns,localName,tagName);\n\t\tif(localNSMap){\n\t\t\tfor (prefix in localNSMap) {\n\t\t\t\tif (Object.prototype.hasOwnProperty.call(localNSMap, prefix)) {\n\t\t\t\t\tdomBuilder.endPrefixMapping(prefix);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}else{\n\t\tel.currentNSMap = currentNSMap;\n\t\tel.localNSMap = localNSMap;\n\t\t//parseStack.push(el);\n\t\treturn true;\n\t}\n}\nfunction parseHtmlSpecialContent(source,elStartEnd,tagName,entityReplacer,domBuilder){\n\tif(/^(?:script|textarea)$/i.test(tagName)){\n\t\tvar elEndStart = source.indexOf('',elStartEnd);\n\t\tvar text = source.substring(elStartEnd+1,elEndStart);\n\t\tif(/[&<]/.test(text)){\n\t\t\tif(/^script$/i.test(tagName)){\n\t\t\t\t//if(!/\\]\\]>/.test(text)){\n\t\t\t\t\t//lexHandler.startCDATA();\n\t\t\t\t\tdomBuilder.characters(text,0,text.length);\n\t\t\t\t\t//lexHandler.endCDATA();\n\t\t\t\t\treturn elEndStart;\n\t\t\t\t//}\n\t\t\t}//}else{//text area\n\t\t\t\ttext = text.replace(/&#?\\w+;/g,entityReplacer);\n\t\t\t\tdomBuilder.characters(text,0,text.length);\n\t\t\t\treturn elEndStart;\n\t\t\t//}\n\n\t\t}\n\t}\n\treturn elStartEnd+1;\n}\nfunction fixSelfClosed(source,elStartEnd,tagName,closeMap){\n\t//if(tagName in closeMap){\n\tvar pos = closeMap[tagName];\n\tif(pos == null){\n\t\t//console.log(tagName)\n\t\tpos = source.lastIndexOf('')\n\t\tif(pos',start+4);\n\t\t\t//append comment source.substring(4,end)// ${end}. Duration (${end - start})\\n`;\n }\n return bufferedRangesStr;\n}\n\n/**\n * ranges\n *\n * Utilities for working with TimeRanges.\n *\n */\n\nconst TIME_FUDGE_FACTOR = 1 / 30; // Comparisons between time values such as current time and the end of the buffered range\n// can be misleading because of precision differences or when the current media has poorly\n// aligned audio and video, which can cause values to be slightly off from what you would\n// expect. This value is what we consider to be safe to use in such comparisons to account\n// for these scenarios.\n\nconst SAFE_TIME_DELTA = TIME_FUDGE_FACTOR * 3;\nconst filterRanges = function (timeRanges, predicate) {\n const results = [];\n let i;\n if (timeRanges && timeRanges.length) {\n // Search for ranges that match the predicate\n for (i = 0; i < timeRanges.length; i++) {\n if (predicate(timeRanges.start(i), timeRanges.end(i))) {\n results.push([timeRanges.start(i), timeRanges.end(i)]);\n }\n }\n }\n return createTimeRanges(results);\n};\n/**\n * Attempts to find the buffered TimeRange that contains the specified\n * time.\n *\n * @param {TimeRanges} buffered - the TimeRanges object to query\n * @param {number} time - the time to filter on.\n * @return {TimeRanges} a new TimeRanges object\n */\n\nconst findRange = function (buffered, time) {\n return filterRanges(buffered, function (start, end) {\n return start - SAFE_TIME_DELTA <= time && end + SAFE_TIME_DELTA >= time;\n });\n};\n/**\n * Returns the TimeRanges that begin later than the specified time.\n *\n * @param {TimeRanges} timeRanges - the TimeRanges object to query\n * @param {number} time - the time to filter on.\n * @return {TimeRanges} a new TimeRanges object.\n */\n\nconst findNextRange = function (timeRanges, time) {\n return filterRanges(timeRanges, function (start) {\n return start - TIME_FUDGE_FACTOR >= time;\n });\n};\n/**\n * Returns gaps within a list of TimeRanges\n *\n * @param {TimeRanges} buffered - the TimeRanges object\n * @return {TimeRanges} a TimeRanges object of gaps\n */\n\nconst findGaps = function (buffered) {\n if (buffered.length < 2) {\n return createTimeRanges();\n }\n const ranges = [];\n for (let i = 1; i < buffered.length; i++) {\n const start = buffered.end(i - 1);\n const end = buffered.start(i);\n ranges.push([start, end]);\n }\n return createTimeRanges(ranges);\n};\n/**\n * Calculate the intersection of two TimeRanges\n *\n * @param {TimeRanges} bufferA\n * @param {TimeRanges} bufferB\n * @return {TimeRanges} The interesection of `bufferA` with `bufferB`\n */\n\nconst bufferIntersection = function (bufferA, bufferB) {\n let start = null;\n let end = null;\n let arity = 0;\n const extents = [];\n const ranges = [];\n if (!bufferA || !bufferA.length || !bufferB || !bufferB.length) {\n return createTimeRanges();\n } // Handle the case where we have both buffers and create an\n // intersection of the two\n\n let count = bufferA.length; // A) Gather up all start and end times\n\n while (count--) {\n extents.push({\n time: bufferA.start(count),\n type: 'start'\n });\n extents.push({\n time: bufferA.end(count),\n type: 'end'\n });\n }\n count = bufferB.length;\n while (count--) {\n extents.push({\n time: bufferB.start(count),\n type: 'start'\n });\n extents.push({\n time: bufferB.end(count),\n type: 'end'\n });\n } // B) Sort them by time\n\n extents.sort(function (a, b) {\n return a.time - b.time;\n }); // C) Go along one by one incrementing arity for start and decrementing\n // arity for ends\n\n for (count = 0; count < extents.length; count++) {\n if (extents[count].type === 'start') {\n arity++; // D) If arity is ever incremented to 2 we are entering an\n // overlapping range\n\n if (arity === 2) {\n start = extents[count].time;\n }\n } else if (extents[count].type === 'end') {\n arity--; // E) If arity is ever decremented to 1 we leaving an\n // overlapping range\n\n if (arity === 1) {\n end = extents[count].time;\n }\n } // F) Record overlapping ranges\n\n if (start !== null && end !== null) {\n ranges.push([start, end]);\n start = null;\n end = null;\n }\n }\n return createTimeRanges(ranges);\n};\n/**\n * Gets a human readable string for a TimeRange\n *\n * @param {TimeRange} range\n * @return {string} a human readable string\n */\n\nconst printableRange = range => {\n const strArr = [];\n if (!range || !range.length) {\n return '';\n }\n for (let i = 0; i < range.length; i++) {\n strArr.push(range.start(i) + ' => ' + range.end(i));\n }\n return strArr.join(', ');\n};\n/**\n * Calculates the amount of time left in seconds until the player hits the end of the\n * buffer and causes a rebuffer\n *\n * @param {TimeRange} buffered\n * The state of the buffer\n * @param {Numnber} currentTime\n * The current time of the player\n * @param {number} playbackRate\n * The current playback rate of the player. Defaults to 1.\n * @return {number}\n * Time until the player has to start rebuffering in seconds.\n * @function timeUntilRebuffer\n */\n\nconst timeUntilRebuffer = function (buffered, currentTime, playbackRate = 1) {\n const bufferedEnd = buffered.length ? buffered.end(buffered.length - 1) : 0;\n return (bufferedEnd - currentTime) / playbackRate;\n};\n/**\n * Converts a TimeRanges object into an array representation\n *\n * @param {TimeRanges} timeRanges\n * @return {Array}\n */\n\nconst timeRangesToArray = timeRanges => {\n const timeRangesList = [];\n for (let i = 0; i < timeRanges.length; i++) {\n timeRangesList.push({\n start: timeRanges.start(i),\n end: timeRanges.end(i)\n });\n }\n return timeRangesList;\n};\n/**\n * Determines if two time range objects are different.\n *\n * @param {TimeRange} a\n * the first time range object to check\n *\n * @param {TimeRange} b\n * the second time range object to check\n *\n * @return {Boolean}\n * Whether the time range objects differ\n */\n\nconst isRangeDifferent = function (a, b) {\n // same object\n if (a === b) {\n return false;\n } // one or the other is undefined\n\n if (!a && b || !b && a) {\n return true;\n } // length is different\n\n if (a.length !== b.length) {\n return true;\n } // see if any start/end pair is different\n\n for (let i = 0; i < a.length; i++) {\n if (a.start(i) !== b.start(i) || a.end(i) !== b.end(i)) {\n return true;\n }\n } // if the length and every pair is the same\n // this is the same time range\n\n return false;\n};\nconst lastBufferedEnd = function (a) {\n if (!a || !a.length || !a.end) {\n return;\n }\n return a.end(a.length - 1);\n};\n/**\n * A utility function to add up the amount of time in a timeRange\n * after a specified startTime.\n * ie:[[0, 10], [20, 40], [50, 60]] with a startTime 0\n * would return 40 as there are 40s seconds after 0 in the timeRange\n *\n * @param {TimeRange} range\n * The range to check against\n * @param {number} startTime\n * The time in the time range that you should start counting from\n *\n * @return {number}\n * The number of seconds in the buffer passed the specified time.\n */\n\nconst timeAheadOf = function (range, startTime) {\n let time = 0;\n if (!range || !range.length) {\n return time;\n }\n for (let i = 0; i < range.length; i++) {\n const start = range.start(i);\n const end = range.end(i); // startTime is after this range entirely\n\n if (startTime > end) {\n continue;\n } // startTime is within this range\n\n if (startTime > start && startTime <= end) {\n time += end - startTime;\n continue;\n } // startTime is before this range.\n\n time += end - start;\n }\n return time;\n};\n\n/**\n * @file playlist.js\n *\n * Playlist related utilities.\n */\n/**\n * Get the duration of a segment, with special cases for\n * llhls segments that do not have a duration yet.\n *\n * @param {Object} playlist\n * the playlist that the segment belongs to.\n * @param {Object} segment\n * the segment to get a duration for.\n *\n * @return {number}\n * the segment duration\n */\n\nconst segmentDurationWithParts = (playlist, segment) => {\n // if this isn't a preload segment\n // then we will have a segment duration that is accurate.\n if (!segment.preload) {\n return segment.duration;\n } // otherwise we have to add up parts and preload hints\n // to get an up to date duration.\n\n let result = 0;\n (segment.parts || []).forEach(function (p) {\n result += p.duration;\n }); // for preload hints we have to use partTargetDuration\n // as they won't even have a duration yet.\n\n (segment.preloadHints || []).forEach(function (p) {\n if (p.type === 'PART') {\n result += playlist.partTargetDuration;\n }\n });\n return result;\n};\n/**\n * A function to get a combined list of parts and segments with durations\n * and indexes.\n *\n * @param {Playlist} playlist the playlist to get the list for.\n *\n * @return {Array} The part/segment list.\n */\n\nconst getPartsAndSegments = playlist => (playlist.segments || []).reduce((acc, segment, si) => {\n if (segment.parts) {\n segment.parts.forEach(function (part, pi) {\n acc.push({\n duration: part.duration,\n segmentIndex: si,\n partIndex: pi,\n part,\n segment\n });\n });\n } else {\n acc.push({\n duration: segment.duration,\n segmentIndex: si,\n partIndex: null,\n segment,\n part: null\n });\n }\n return acc;\n}, []);\nconst getLastParts = media => {\n const lastSegment = media.segments && media.segments.length && media.segments[media.segments.length - 1];\n return lastSegment && lastSegment.parts || [];\n};\nconst getKnownPartCount = ({\n preloadSegment\n}) => {\n if (!preloadSegment) {\n return;\n }\n const {\n parts,\n preloadHints\n } = preloadSegment;\n let partCount = (preloadHints || []).reduce((count, hint) => count + (hint.type === 'PART' ? 1 : 0), 0);\n partCount += parts && parts.length ? parts.length : 0;\n return partCount;\n};\n/**\n * Get the number of seconds to delay from the end of a\n * live playlist.\n *\n * @param {Playlist} main the main playlist\n * @param {Playlist} media the media playlist\n * @return {number} the hold back in seconds.\n */\n\nconst liveEdgeDelay = (main, media) => {\n if (media.endList) {\n return 0;\n } // dash suggestedPresentationDelay trumps everything\n\n if (main && main.suggestedPresentationDelay) {\n return main.suggestedPresentationDelay;\n }\n const hasParts = getLastParts(media).length > 0; // look for \"part\" delays from ll-hls first\n\n if (hasParts && media.serverControl && media.serverControl.partHoldBack) {\n return media.serverControl.partHoldBack;\n } else if (hasParts && media.partTargetDuration) {\n return media.partTargetDuration * 3; // finally look for full segment delays\n } else if (media.serverControl && media.serverControl.holdBack) {\n return media.serverControl.holdBack;\n } else if (media.targetDuration) {\n return media.targetDuration * 3;\n }\n return 0;\n};\n/**\n * walk backward until we find a duration we can use\n * or return a failure\n *\n * @param {Playlist} playlist the playlist to walk through\n * @param {Number} endSequence the mediaSequence to stop walking on\n */\n\nconst backwardDuration = function (playlist, endSequence) {\n let result = 0;\n let i = endSequence - playlist.mediaSequence; // if a start time is available for segment immediately following\n // the interval, use it\n\n let segment = playlist.segments[i]; // Walk backward until we find the latest segment with timeline\n // information that is earlier than endSequence\n\n if (segment) {\n if (typeof segment.start !== 'undefined') {\n return {\n result: segment.start,\n precise: true\n };\n }\n if (typeof segment.end !== 'undefined') {\n return {\n result: segment.end - segment.duration,\n precise: true\n };\n }\n }\n while (i--) {\n segment = playlist.segments[i];\n if (typeof segment.end !== 'undefined') {\n return {\n result: result + segment.end,\n precise: true\n };\n }\n result += segmentDurationWithParts(playlist, segment);\n if (typeof segment.start !== 'undefined') {\n return {\n result: result + segment.start,\n precise: true\n };\n }\n }\n return {\n result,\n precise: false\n };\n};\n/**\n * walk forward until we find a duration we can use\n * or return a failure\n *\n * @param {Playlist} playlist the playlist to walk through\n * @param {number} endSequence the mediaSequence to stop walking on\n */\n\nconst forwardDuration = function (playlist, endSequence) {\n let result = 0;\n let segment;\n let i = endSequence - playlist.mediaSequence; // Walk forward until we find the earliest segment with timeline\n // information\n\n for (; i < playlist.segments.length; i++) {\n segment = playlist.segments[i];\n if (typeof segment.start !== 'undefined') {\n return {\n result: segment.start - result,\n precise: true\n };\n }\n result += segmentDurationWithParts(playlist, segment);\n if (typeof segment.end !== 'undefined') {\n return {\n result: segment.end - result,\n precise: true\n };\n }\n } // indicate we didn't find a useful duration estimate\n\n return {\n result: -1,\n precise: false\n };\n};\n/**\n * Calculate the media duration from the segments associated with a\n * playlist. The duration of a subinterval of the available segments\n * may be calculated by specifying an end index.\n *\n * @param {Object} playlist a media playlist object\n * @param {number=} endSequence an exclusive upper boundary\n * for the playlist. Defaults to playlist length.\n * @param {number} expired the amount of time that has dropped\n * off the front of the playlist in a live scenario\n * @return {number} the duration between the first available segment\n * and end index.\n */\n\nconst intervalDuration = function (playlist, endSequence, expired) {\n if (typeof endSequence === 'undefined') {\n endSequence = playlist.mediaSequence + playlist.segments.length;\n }\n if (endSequence < playlist.mediaSequence) {\n return 0;\n } // do a backward walk to estimate the duration\n\n const backward = backwardDuration(playlist, endSequence);\n if (backward.precise) {\n // if we were able to base our duration estimate on timing\n // information provided directly from the Media Source, return\n // it\n return backward.result;\n } // walk forward to see if a precise duration estimate can be made\n // that way\n\n const forward = forwardDuration(playlist, endSequence);\n if (forward.precise) {\n // we found a segment that has been buffered and so it's\n // position is known precisely\n return forward.result;\n } // return the less-precise, playlist-based duration estimate\n\n return backward.result + expired;\n};\n/**\n * Calculates the duration of a playlist. If a start and end index\n * are specified, the duration will be for the subset of the media\n * timeline between those two indices. The total duration for live\n * playlists is always Infinity.\n *\n * @param {Object} playlist a media playlist object\n * @param {number=} endSequence an exclusive upper\n * boundary for the playlist. Defaults to the playlist media\n * sequence number plus its length.\n * @param {number=} expired the amount of time that has\n * dropped off the front of the playlist in a live scenario\n * @return {number} the duration between the start index and end\n * index.\n */\n\nconst duration = function (playlist, endSequence, expired) {\n if (!playlist) {\n return 0;\n }\n if (typeof expired !== 'number') {\n expired = 0;\n } // if a slice of the total duration is not requested, use\n // playlist-level duration indicators when they're present\n\n if (typeof endSequence === 'undefined') {\n // if present, use the duration specified in the playlist\n if (playlist.totalDuration) {\n return playlist.totalDuration;\n } // duration should be Infinity for live playlists\n\n if (!playlist.endList) {\n return window$1.Infinity;\n }\n } // calculate the total duration based on the segment durations\n\n return intervalDuration(playlist, endSequence, expired);\n};\n/**\n * Calculate the time between two indexes in the current playlist\n * neight the start- nor the end-index need to be within the current\n * playlist in which case, the targetDuration of the playlist is used\n * to approximate the durations of the segments\n *\n * @param {Array} options.durationList list to iterate over for durations.\n * @param {number} options.defaultDuration duration to use for elements before or after the durationList\n * @param {number} options.startIndex partsAndSegments index to start\n * @param {number} options.endIndex partsAndSegments index to end.\n * @return {number} the number of seconds between startIndex and endIndex\n */\n\nconst sumDurations = function ({\n defaultDuration,\n durationList,\n startIndex,\n endIndex\n}) {\n let durations = 0;\n if (startIndex > endIndex) {\n [startIndex, endIndex] = [endIndex, startIndex];\n }\n if (startIndex < 0) {\n for (let i = startIndex; i < Math.min(0, endIndex); i++) {\n durations += defaultDuration;\n }\n startIndex = 0;\n }\n for (let i = startIndex; i < endIndex; i++) {\n durations += durationList[i].duration;\n }\n return durations;\n};\n/**\n * Calculates the playlist end time\n *\n * @param {Object} playlist a media playlist object\n * @param {number=} expired the amount of time that has\n * dropped off the front of the playlist in a live scenario\n * @param {boolean|false} useSafeLiveEnd a boolean value indicating whether or not the\n * playlist end calculation should consider the safe live end\n * (truncate the playlist end by three segments). This is normally\n * used for calculating the end of the playlist's seekable range.\n * This takes into account the value of liveEdgePadding.\n * Setting liveEdgePadding to 0 is equivalent to setting this to false.\n * @param {number} liveEdgePadding a number indicating how far from the end of the playlist we should be in seconds.\n * If this is provided, it is used in the safe live end calculation.\n * Setting useSafeLiveEnd=false or liveEdgePadding=0 are equivalent.\n * Corresponds to suggestedPresentationDelay in DASH manifests.\n * @return {number} the end time of playlist\n * @function playlistEnd\n */\n\nconst playlistEnd = function (playlist, expired, useSafeLiveEnd, liveEdgePadding) {\n if (!playlist || !playlist.segments) {\n return null;\n }\n if (playlist.endList) {\n return duration(playlist);\n }\n if (expired === null) {\n return null;\n }\n expired = expired || 0;\n let lastSegmentEndTime = intervalDuration(playlist, playlist.mediaSequence + playlist.segments.length, expired);\n if (useSafeLiveEnd) {\n liveEdgePadding = typeof liveEdgePadding === 'number' ? liveEdgePadding : liveEdgeDelay(null, playlist);\n lastSegmentEndTime -= liveEdgePadding;\n } // don't return a time less than zero\n\n return Math.max(0, lastSegmentEndTime);\n};\n/**\n * Calculates the interval of time that is currently seekable in a\n * playlist. The returned time ranges are relative to the earliest\n * moment in the specified playlist that is still available. A full\n * seekable implementation for live streams would need to offset\n * these values by the duration of content that has expired from the\n * stream.\n *\n * @param {Object} playlist a media playlist object\n * dropped off the front of the playlist in a live scenario\n * @param {number=} expired the amount of time that has\n * dropped off the front of the playlist in a live scenario\n * @param {number} liveEdgePadding how far from the end of the playlist we should be in seconds.\n * Corresponds to suggestedPresentationDelay in DASH manifests.\n * @return {TimeRanges} the periods of time that are valid targets\n * for seeking\n */\n\nconst seekable = function (playlist, expired, liveEdgePadding) {\n const useSafeLiveEnd = true;\n const seekableStart = expired || 0;\n let seekableEnd = playlistEnd(playlist, expired, useSafeLiveEnd, liveEdgePadding);\n if (seekableEnd === null) {\n return createTimeRanges();\n } // Clamp seekable end since it can not be less than the seekable start\n\n if (seekableEnd < seekableStart) {\n seekableEnd = seekableStart;\n }\n return createTimeRanges(seekableStart, seekableEnd);\n};\n/**\n * Determine the index and estimated starting time of the segment that\n * contains a specified playback position in a media playlist.\n *\n * @param {Object} options.playlist the media playlist to query\n * @param {number} options.currentTime The number of seconds since the earliest\n * possible position to determine the containing segment for\n * @param {number} options.startTime the time when the segment/part starts\n * @param {number} options.startingSegmentIndex the segment index to start looking at.\n * @param {number?} [options.startingPartIndex] the part index to look at within the segment.\n *\n * @return {Object} an object with partIndex, segmentIndex, and startTime.\n */\n\nconst getMediaInfoForTime = function ({\n playlist,\n currentTime,\n startingSegmentIndex,\n startingPartIndex,\n startTime,\n exactManifestTimings\n}) {\n let time = currentTime - startTime;\n const partsAndSegments = getPartsAndSegments(playlist);\n let startIndex = 0;\n for (let i = 0; i < partsAndSegments.length; i++) {\n const partAndSegment = partsAndSegments[i];\n if (startingSegmentIndex !== partAndSegment.segmentIndex) {\n continue;\n } // skip this if part index does not match.\n\n if (typeof startingPartIndex === 'number' && typeof partAndSegment.partIndex === 'number' && startingPartIndex !== partAndSegment.partIndex) {\n continue;\n }\n startIndex = i;\n break;\n }\n if (time < 0) {\n // Walk backward from startIndex in the playlist, adding durations\n // until we find a segment that contains `time` and return it\n if (startIndex > 0) {\n for (let i = startIndex - 1; i >= 0; i--) {\n const partAndSegment = partsAndSegments[i];\n time += partAndSegment.duration;\n if (exactManifestTimings) {\n if (time < 0) {\n continue;\n }\n } else if (time + TIME_FUDGE_FACTOR <= 0) {\n continue;\n }\n return {\n partIndex: partAndSegment.partIndex,\n segmentIndex: partAndSegment.segmentIndex,\n startTime: startTime - sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: partsAndSegments,\n startIndex,\n endIndex: i\n })\n };\n }\n } // We were unable to find a good segment within the playlist\n // so select the first segment\n\n return {\n partIndex: partsAndSegments[0] && partsAndSegments[0].partIndex || null,\n segmentIndex: partsAndSegments[0] && partsAndSegments[0].segmentIndex || 0,\n startTime: currentTime\n };\n } // When startIndex is negative, we first walk forward to first segment\n // adding target durations. If we \"run out of time\" before getting to\n // the first segment, return the first segment\n\n if (startIndex < 0) {\n for (let i = startIndex; i < 0; i++) {\n time -= playlist.targetDuration;\n if (time < 0) {\n return {\n partIndex: partsAndSegments[0] && partsAndSegments[0].partIndex || null,\n segmentIndex: partsAndSegments[0] && partsAndSegments[0].segmentIndex || 0,\n startTime: currentTime\n };\n }\n }\n startIndex = 0;\n } // Walk forward from startIndex in the playlist, subtracting durations\n // until we find a segment that contains `time` and return it\n\n for (let i = startIndex; i < partsAndSegments.length; i++) {\n const partAndSegment = partsAndSegments[i];\n time -= partAndSegment.duration;\n const canUseFudgeFactor = partAndSegment.duration > TIME_FUDGE_FACTOR;\n const isExactlyAtTheEnd = time === 0;\n const isExtremelyCloseToTheEnd = canUseFudgeFactor && time + TIME_FUDGE_FACTOR >= 0;\n if (isExactlyAtTheEnd || isExtremelyCloseToTheEnd) {\n // 1) We are exactly at the end of the current segment.\n // 2) We are extremely close to the end of the current segment (The difference is less than 1 / 30).\n // We may encounter this situation when\n // we don't have exact match between segment duration info in the manifest and the actual duration of the segment\n // For example:\n // We appended 3 segments 10 seconds each, meaning we should have 30 sec buffered,\n // but we the actual buffered is 29.99999\n //\n // In both cases:\n // if we passed current time -> it means that we already played current segment\n // if we passed buffered.end -> it means that this segment is already loaded and buffered\n // we should select the next segment if we have one:\n if (i !== partsAndSegments.length - 1) {\n continue;\n }\n }\n if (exactManifestTimings) {\n if (time > 0) {\n continue;\n }\n } else if (time - TIME_FUDGE_FACTOR >= 0) {\n continue;\n }\n return {\n partIndex: partAndSegment.partIndex,\n segmentIndex: partAndSegment.segmentIndex,\n startTime: startTime + sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: partsAndSegments,\n startIndex,\n endIndex: i\n })\n };\n } // We are out of possible candidates so load the last one...\n\n return {\n segmentIndex: partsAndSegments[partsAndSegments.length - 1].segmentIndex,\n partIndex: partsAndSegments[partsAndSegments.length - 1].partIndex,\n startTime: currentTime\n };\n};\n/**\n * Check whether the playlist is excluded or not.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is excluded or not\n * @function isExcluded\n */\n\nconst isExcluded = function (playlist) {\n return playlist.excludeUntil && playlist.excludeUntil > Date.now();\n};\n/**\n * Check whether the playlist is compatible with current playback configuration or has\n * been excluded permanently for being incompatible.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is incompatible or not\n * @function isIncompatible\n */\n\nconst isIncompatible = function (playlist) {\n return playlist.excludeUntil && playlist.excludeUntil === Infinity;\n};\n/**\n * Check whether the playlist is enabled or not.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is enabled or not\n * @function isEnabled\n */\n\nconst isEnabled = function (playlist) {\n const excluded = isExcluded(playlist);\n return !playlist.disabled && !excluded;\n};\n/**\n * Check whether the playlist has been manually disabled through the representations api.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is disabled manually or not\n * @function isDisabled\n */\n\nconst isDisabled = function (playlist) {\n return playlist.disabled;\n};\n/**\n * Returns whether the current playlist is an AES encrypted HLS stream\n *\n * @return {boolean} true if it's an AES encrypted HLS stream\n */\n\nconst isAes = function (media) {\n for (let i = 0; i < media.segments.length; i++) {\n if (media.segments[i].key) {\n return true;\n }\n }\n return false;\n};\n/**\n * Checks if the playlist has a value for the specified attribute\n *\n * @param {string} attr\n * Attribute to check for\n * @param {Object} playlist\n * The media playlist object\n * @return {boolean}\n * Whether the playlist contains a value for the attribute or not\n * @function hasAttribute\n */\n\nconst hasAttribute = function (attr, playlist) {\n return playlist.attributes && playlist.attributes[attr];\n};\n/**\n * Estimates the time required to complete a segment download from the specified playlist\n *\n * @param {number} segmentDuration\n * Duration of requested segment\n * @param {number} bandwidth\n * Current measured bandwidth of the player\n * @param {Object} playlist\n * The media playlist object\n * @param {number=} bytesReceived\n * Number of bytes already received for the request. Defaults to 0\n * @return {number|NaN}\n * The estimated time to request the segment. NaN if bandwidth information for\n * the given playlist is unavailable\n * @function estimateSegmentRequestTime\n */\n\nconst estimateSegmentRequestTime = function (segmentDuration, bandwidth, playlist, bytesReceived = 0) {\n if (!hasAttribute('BANDWIDTH', playlist)) {\n return NaN;\n }\n const size = segmentDuration * playlist.attributes.BANDWIDTH;\n return (size - bytesReceived * 8) / bandwidth;\n};\n/*\n * Returns whether the current playlist is the lowest rendition\n *\n * @return {Boolean} true if on lowest rendition\n */\n\nconst isLowestEnabledRendition = (main, media) => {\n if (main.playlists.length === 1) {\n return true;\n }\n const currentBandwidth = media.attributes.BANDWIDTH || Number.MAX_VALUE;\n return main.playlists.filter(playlist => {\n if (!isEnabled(playlist)) {\n return false;\n }\n return (playlist.attributes.BANDWIDTH || 0) < currentBandwidth;\n }).length === 0;\n};\nconst playlistMatch = (a, b) => {\n // both playlits are null\n // or only one playlist is non-null\n // no match\n if (!a && !b || !a && b || a && !b) {\n return false;\n } // playlist objects are the same, match\n\n if (a === b) {\n return true;\n } // first try to use id as it should be the most\n // accurate\n\n if (a.id && b.id && a.id === b.id) {\n return true;\n } // next try to use reslovedUri as it should be the\n // second most accurate.\n\n if (a.resolvedUri && b.resolvedUri && a.resolvedUri === b.resolvedUri) {\n return true;\n } // finally try to use uri as it should be accurate\n // but might miss a few cases for relative uris\n\n if (a.uri && b.uri && a.uri === b.uri) {\n return true;\n }\n return false;\n};\nconst someAudioVariant = function (main, callback) {\n const AUDIO = main && main.mediaGroups && main.mediaGroups.AUDIO || {};\n let found = false;\n for (const groupName in AUDIO) {\n for (const label in AUDIO[groupName]) {\n found = callback(AUDIO[groupName][label]);\n if (found) {\n break;\n }\n }\n if (found) {\n break;\n }\n }\n return !!found;\n};\nconst isAudioOnly = main => {\n // we are audio only if we have no main playlists but do\n // have media group playlists.\n if (!main || !main.playlists || !main.playlists.length) {\n // without audio variants or playlists this\n // is not an audio only main.\n const found = someAudioVariant(main, variant => variant.playlists && variant.playlists.length || variant.uri);\n return found;\n } // if every playlist has only an audio codec it is audio only\n\n for (let i = 0; i < main.playlists.length; i++) {\n const playlist = main.playlists[i];\n const CODECS = playlist.attributes && playlist.attributes.CODECS; // all codecs are audio, this is an audio playlist.\n\n if (CODECS && CODECS.split(',').every(c => isAudioCodec(c))) {\n continue;\n } // playlist is in an audio group it is audio only\n\n const found = someAudioVariant(main, variant => playlistMatch(playlist, variant));\n if (found) {\n continue;\n } // if we make it here this playlist isn't audio and we\n // are not audio only\n\n return false;\n } // if we make it past every playlist without returning, then\n // this is an audio only playlist.\n\n return true;\n}; // exports\n\nvar Playlist = {\n liveEdgeDelay,\n duration,\n seekable,\n getMediaInfoForTime,\n isEnabled,\n isDisabled,\n isExcluded,\n isIncompatible,\n playlistEnd,\n isAes,\n hasAttribute,\n estimateSegmentRequestTime,\n isLowestEnabledRendition,\n isAudioOnly,\n playlistMatch,\n segmentDurationWithParts\n};\nconst {\n log\n} = videojs;\nconst createPlaylistID = (index, uri) => {\n return `${index}-${uri}`;\n}; // default function for creating a group id\n\nconst groupID = (type, group, label) => {\n return `placeholder-uri-${type}-${group}-${label}`;\n};\n/**\n * Parses a given m3u8 playlist\n *\n * @param {Function} [onwarn]\n * a function to call when the parser triggers a warning event.\n * @param {Function} [oninfo]\n * a function to call when the parser triggers an info event.\n * @param {string} manifestString\n * The downloaded manifest string\n * @param {Object[]} [customTagParsers]\n * An array of custom tag parsers for the m3u8-parser instance\n * @param {Object[]} [customTagMappers]\n * An array of custom tag mappers for the m3u8-parser instance\n * @param {boolean} [llhls]\n * Whether to keep ll-hls features in the manifest after parsing.\n * @return {Object}\n * The manifest object\n */\n\nconst parseManifest = ({\n onwarn,\n oninfo,\n manifestString,\n customTagParsers = [],\n customTagMappers = [],\n llhls\n}) => {\n const parser = new Parser();\n if (onwarn) {\n parser.on('warn', onwarn);\n }\n if (oninfo) {\n parser.on('info', oninfo);\n }\n customTagParsers.forEach(customParser => parser.addParser(customParser));\n customTagMappers.forEach(mapper => parser.addTagMapper(mapper));\n parser.push(manifestString);\n parser.end();\n const manifest = parser.manifest; // remove llhls features from the parsed manifest\n // if we don't want llhls support.\n\n if (!llhls) {\n ['preloadSegment', 'skip', 'serverControl', 'renditionReports', 'partInf', 'partTargetDuration'].forEach(function (k) {\n if (manifest.hasOwnProperty(k)) {\n delete manifest[k];\n }\n });\n if (manifest.segments) {\n manifest.segments.forEach(function (segment) {\n ['parts', 'preloadHints'].forEach(function (k) {\n if (segment.hasOwnProperty(k)) {\n delete segment[k];\n }\n });\n });\n }\n }\n if (!manifest.targetDuration) {\n let targetDuration = 10;\n if (manifest.segments && manifest.segments.length) {\n targetDuration = manifest.segments.reduce((acc, s) => Math.max(acc, s.duration), 0);\n }\n if (onwarn) {\n onwarn({\n message: `manifest has no targetDuration defaulting to ${targetDuration}`\n });\n }\n manifest.targetDuration = targetDuration;\n }\n const parts = getLastParts(manifest);\n if (parts.length && !manifest.partTargetDuration) {\n const partTargetDuration = parts.reduce((acc, p) => Math.max(acc, p.duration), 0);\n if (onwarn) {\n onwarn({\n message: `manifest has no partTargetDuration defaulting to ${partTargetDuration}`\n });\n log.error('LL-HLS manifest has parts but lacks required #EXT-X-PART-INF:PART-TARGET value. See https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis-09#section-4.4.3.7. Playback is not guaranteed.');\n }\n manifest.partTargetDuration = partTargetDuration;\n }\n return manifest;\n};\n/**\n * Loops through all supported media groups in main and calls the provided\n * callback for each group\n *\n * @param {Object} main\n * The parsed main manifest object\n * @param {Function} callback\n * Callback to call for each media group\n */\n\nconst forEachMediaGroup = (main, callback) => {\n if (!main.mediaGroups) {\n return;\n }\n ['AUDIO', 'SUBTITLES'].forEach(mediaType => {\n if (!main.mediaGroups[mediaType]) {\n return;\n }\n for (const groupKey in main.mediaGroups[mediaType]) {\n for (const labelKey in main.mediaGroups[mediaType][groupKey]) {\n const mediaProperties = main.mediaGroups[mediaType][groupKey][labelKey];\n callback(mediaProperties, mediaType, groupKey, labelKey);\n }\n }\n });\n};\n/**\n * Adds properties and attributes to the playlist to keep consistent functionality for\n * playlists throughout VHS.\n *\n * @param {Object} config\n * Arguments object\n * @param {Object} config.playlist\n * The media playlist\n * @param {string} [config.uri]\n * The uri to the media playlist (if media playlist is not from within a main\n * playlist)\n * @param {string} id\n * ID to use for the playlist\n */\n\nconst setupMediaPlaylist = ({\n playlist,\n uri,\n id\n}) => {\n playlist.id = id;\n playlist.playlistErrors_ = 0;\n if (uri) {\n // For media playlists, m3u8-parser does not have access to a URI, as HLS media\n // playlists do not contain their own source URI, but one is needed for consistency in\n // VHS.\n playlist.uri = uri;\n } // For HLS main playlists, even though certain attributes MUST be defined, the\n // stream may still be played without them.\n // For HLS media playlists, m3u8-parser does not attach an attributes object to the\n // manifest.\n //\n // To avoid undefined reference errors through the project, and make the code easier\n // to write/read, add an empty attributes object for these cases.\n\n playlist.attributes = playlist.attributes || {};\n};\n/**\n * Adds ID, resolvedUri, and attributes properties to each playlist of the main, where\n * necessary. In addition, creates playlist IDs for each playlist and adds playlist ID to\n * playlist references to the playlists array.\n *\n * @param {Object} main\n * The main playlist\n */\n\nconst setupMediaPlaylists = main => {\n let i = main.playlists.length;\n while (i--) {\n const playlist = main.playlists[i];\n setupMediaPlaylist({\n playlist,\n id: createPlaylistID(i, playlist.uri)\n });\n playlist.resolvedUri = resolveUrl(main.uri, playlist.uri);\n main.playlists[playlist.id] = playlist; // URI reference added for backwards compatibility\n\n main.playlists[playlist.uri] = playlist; // Although the spec states an #EXT-X-STREAM-INF tag MUST have a BANDWIDTH attribute,\n // the stream can be played without it. Although an attributes property may have been\n // added to the playlist to prevent undefined references, issue a warning to fix the\n // manifest.\n\n if (!playlist.attributes.BANDWIDTH) {\n log.warn('Invalid playlist STREAM-INF detected. Missing BANDWIDTH attribute.');\n }\n }\n};\n/**\n * Adds resolvedUri properties to each media group.\n *\n * @param {Object} main\n * The main playlist\n */\n\nconst resolveMediaGroupUris = main => {\n forEachMediaGroup(main, properties => {\n if (properties.uri) {\n properties.resolvedUri = resolveUrl(main.uri, properties.uri);\n }\n });\n};\n/**\n * Creates a main playlist wrapper to insert a sole media playlist into.\n *\n * @param {Object} media\n * Media playlist\n * @param {string} uri\n * The media URI\n *\n * @return {Object}\n * main playlist\n */\n\nconst mainForMedia = (media, uri) => {\n const id = createPlaylistID(0, uri);\n const main = {\n mediaGroups: {\n 'AUDIO': {},\n 'VIDEO': {},\n 'CLOSED-CAPTIONS': {},\n 'SUBTITLES': {}\n },\n uri: window$1.location.href,\n resolvedUri: window$1.location.href,\n playlists: [{\n uri,\n id,\n resolvedUri: uri,\n // m3u8-parser does not attach an attributes property to media playlists so make\n // sure that the property is attached to avoid undefined reference errors\n attributes: {}\n }]\n }; // set up ID reference\n\n main.playlists[id] = main.playlists[0]; // URI reference added for backwards compatibility\n\n main.playlists[uri] = main.playlists[0];\n return main;\n};\n/**\n * Does an in-place update of the main manifest to add updated playlist URI references\n * as well as other properties needed by VHS that aren't included by the parser.\n *\n * @param {Object} main\n * main manifest object\n * @param {string} uri\n * The source URI\n * @param {function} createGroupID\n * A function to determine how to create the groupID for mediaGroups\n */\n\nconst addPropertiesToMain = (main, uri, createGroupID = groupID) => {\n main.uri = uri;\n for (let i = 0; i < main.playlists.length; i++) {\n if (!main.playlists[i].uri) {\n // Set up phony URIs for the playlists since playlists are referenced by their URIs\n // throughout VHS, but some formats (e.g., DASH) don't have external URIs\n // TODO: consider adding dummy URIs in mpd-parser\n const phonyUri = `placeholder-uri-${i}`;\n main.playlists[i].uri = phonyUri;\n }\n }\n const audioOnlyMain = isAudioOnly(main);\n forEachMediaGroup(main, (properties, mediaType, groupKey, labelKey) => {\n // add a playlist array under properties\n if (!properties.playlists || !properties.playlists.length) {\n // If the manifest is audio only and this media group does not have a uri, check\n // if the media group is located in the main list of playlists. If it is, don't add\n // placeholder properties as it shouldn't be considered an alternate audio track.\n if (audioOnlyMain && mediaType === 'AUDIO' && !properties.uri) {\n for (let i = 0; i < main.playlists.length; i++) {\n const p = main.playlists[i];\n if (p.attributes && p.attributes.AUDIO && p.attributes.AUDIO === groupKey) {\n return;\n }\n }\n }\n properties.playlists = [_extends({}, properties)];\n }\n properties.playlists.forEach(function (p, i) {\n const groupId = createGroupID(mediaType, groupKey, labelKey, p);\n const id = createPlaylistID(i, groupId);\n if (p.uri) {\n p.resolvedUri = p.resolvedUri || resolveUrl(main.uri, p.uri);\n } else {\n // DEPRECATED, this has been added to prevent a breaking change.\n // previously we only ever had a single media group playlist, so\n // we mark the first playlist uri without prepending the index as we used to\n // ideally we would do all of the playlists the same way.\n p.uri = i === 0 ? groupId : id; // don't resolve a placeholder uri to an absolute url, just use\n // the placeholder again\n\n p.resolvedUri = p.uri;\n }\n p.id = p.id || id; // add an empty attributes object, all playlists are\n // expected to have this.\n\n p.attributes = p.attributes || {}; // setup ID and URI references (URI for backwards compatibility)\n\n main.playlists[p.id] = p;\n main.playlists[p.uri] = p;\n });\n });\n setupMediaPlaylists(main);\n resolveMediaGroupUris(main);\n};\nclass DateRangesStorage {\n constructor() {\n this.offset_ = null;\n this.pendingDateRanges_ = new Map();\n this.processedDateRanges_ = new Map();\n }\n setOffset(segments = []) {\n // already set\n if (this.offset_ !== null) {\n return;\n } // no segment to process\n\n if (!segments.length) {\n return;\n }\n const [firstSegment] = segments; // no program date time\n\n if (firstSegment.programDateTime === undefined) {\n return;\n } // Set offset as ProgramDateTime for the very first segment of the very first playlist load:\n\n this.offset_ = firstSegment.programDateTime / 1000;\n }\n setPendingDateRanges(dateRanges = []) {\n if (!dateRanges.length) {\n return;\n }\n const [dateRange] = dateRanges;\n const startTime = dateRange.startDate.getTime();\n this.trimProcessedDateRanges_(startTime);\n this.pendingDateRanges_ = dateRanges.reduce((map, pendingDateRange) => {\n map.set(pendingDateRange.id, pendingDateRange);\n return map;\n }, new Map());\n }\n processDateRange(dateRange) {\n this.pendingDateRanges_.delete(dateRange.id);\n this.processedDateRanges_.set(dateRange.id, dateRange);\n }\n getDateRangesToProcess() {\n if (this.offset_ === null) {\n return [];\n }\n const dateRangeClasses = {};\n const dateRangesToProcess = [];\n this.pendingDateRanges_.forEach((dateRange, id) => {\n if (this.processedDateRanges_.has(id)) {\n return;\n }\n dateRange.startTime = dateRange.startDate.getTime() / 1000 - this.offset_;\n dateRange.processDateRange = () => this.processDateRange(dateRange);\n dateRangesToProcess.push(dateRange);\n if (!dateRange.class) {\n return;\n }\n if (dateRangeClasses[dateRange.class]) {\n const length = dateRangeClasses[dateRange.class].push(dateRange);\n dateRange.classListIndex = length - 1;\n } else {\n dateRangeClasses[dateRange.class] = [dateRange];\n dateRange.classListIndex = 0;\n }\n });\n for (const dateRange of dateRangesToProcess) {\n const classList = dateRangeClasses[dateRange.class] || [];\n if (dateRange.endDate) {\n dateRange.endTime = dateRange.endDate.getTime() / 1000 - this.offset_;\n } else if (dateRange.endOnNext && classList[dateRange.classListIndex + 1]) {\n dateRange.endTime = classList[dateRange.classListIndex + 1].startTime;\n } else if (dateRange.duration) {\n dateRange.endTime = dateRange.startTime + dateRange.duration;\n } else if (dateRange.plannedDuration) {\n dateRange.endTime = dateRange.startTime + dateRange.plannedDuration;\n } else {\n dateRange.endTime = dateRange.startTime;\n }\n }\n return dateRangesToProcess;\n }\n trimProcessedDateRanges_(startTime) {\n const copy = new Map(this.processedDateRanges_);\n copy.forEach((dateRange, id) => {\n if (dateRange.startDate.getTime() < startTime) {\n this.processedDateRanges_.delete(id);\n }\n });\n }\n}\nconst QUOTA_EXCEEDED_ERR = 22;\nconst getStreamingNetworkErrorMetadata = ({\n requestType,\n request,\n error,\n parseFailure\n}) => {\n const isBadStatus = request.status < 200 || request.status > 299;\n const isFailure = request.status >= 400 && request.status <= 499;\n const errorMetadata = {\n uri: request.uri,\n requestType\n };\n const isBadStatusOrParseFailure = isBadStatus && !isFailure || parseFailure;\n if (error && isFailure) {\n // copy original error and add to the metadata.\n errorMetadata.error = _extends({}, error);\n errorMetadata.errorType = videojs.Error.NetworkRequestFailed;\n } else if (request.aborted) {\n errorMetadata.errorType = videojs.Error.NetworkRequestAborted;\n } else if (request.timedout) {\n errorMetadata.erroType = videojs.Error.NetworkRequestTimeout;\n } else if (isBadStatusOrParseFailure) {\n const errorType = parseFailure ? videojs.Error.NetworkBodyParserFailed : videojs.Error.NetworkBadStatus;\n errorMetadata.errorType = errorType;\n errorMetadata.status = request.status;\n errorMetadata.headers = request.headers;\n }\n return errorMetadata;\n};\nconst {\n EventTarget: EventTarget$1\n} = videojs;\nconst addLLHLSQueryDirectives = (uri, media) => {\n if (media.endList || !media.serverControl) {\n return uri;\n }\n const parameters = {};\n if (media.serverControl.canBlockReload) {\n const {\n preloadSegment\n } = media; // next msn is a zero based value, length is not.\n\n let nextMSN = media.mediaSequence + media.segments.length; // If preload segment has parts then it is likely\n // that we are going to request a part of that preload segment.\n // the logic below is used to determine that.\n\n if (preloadSegment) {\n const parts = preloadSegment.parts || []; // _HLS_part is a zero based index\n\n const nextPart = getKnownPartCount(media) - 1; // if nextPart is > -1 and not equal to just the\n // length of parts, then we know we had part preload hints\n // and we need to add the _HLS_part= query\n\n if (nextPart > -1 && nextPart !== parts.length - 1) {\n // add existing parts to our preload hints\n // eslint-disable-next-line\n parameters._HLS_part = nextPart;\n } // this if statement makes sure that we request the msn\n // of the preload segment if:\n // 1. the preload segment had parts (and was not yet a full segment)\n // but was added to our segments array\n // 2. the preload segment had preload hints for parts that are not in\n // the manifest yet.\n // in all other cases we want the segment after the preload segment\n // which will be given by using media.segments.length because it is 1 based\n // rather than 0 based.\n\n if (nextPart > -1 || parts.length) {\n nextMSN--;\n }\n } // add _HLS_msn= in front of any _HLS_part query\n // eslint-disable-next-line\n\n parameters._HLS_msn = nextMSN;\n }\n if (media.serverControl && media.serverControl.canSkipUntil) {\n // add _HLS_skip= infront of all other queries.\n // eslint-disable-next-line\n parameters._HLS_skip = media.serverControl.canSkipDateranges ? 'v2' : 'YES';\n }\n if (Object.keys(parameters).length) {\n const parsedUri = new window$1.URL(uri);\n ['_HLS_skip', '_HLS_msn', '_HLS_part'].forEach(function (name) {\n if (!parameters.hasOwnProperty(name)) {\n return;\n }\n parsedUri.searchParams.set(name, parameters[name]);\n });\n uri = parsedUri.toString();\n }\n return uri;\n};\n/**\n * Returns a new segment object with properties and\n * the parts array merged.\n *\n * @param {Object} a the old segment\n * @param {Object} b the new segment\n *\n * @return {Object} the merged segment\n */\n\nconst updateSegment = (a, b) => {\n if (!a) {\n return b;\n }\n const result = merge(a, b); // if only the old segment has preload hints\n // and the new one does not, remove preload hints.\n\n if (a.preloadHints && !b.preloadHints) {\n delete result.preloadHints;\n } // if only the old segment has parts\n // then the parts are no longer valid\n\n if (a.parts && !b.parts) {\n delete result.parts; // if both segments have parts\n // copy part propeties from the old segment\n // to the new one.\n } else if (a.parts && b.parts) {\n for (let i = 0; i < b.parts.length; i++) {\n if (a.parts && a.parts[i]) {\n result.parts[i] = merge(a.parts[i], b.parts[i]);\n }\n }\n } // set skipped to false for segments that have\n // have had information merged from the old segment.\n\n if (!a.skipped && b.skipped) {\n result.skipped = false;\n } // set preload to false for segments that have\n // had information added in the new segment.\n\n if (a.preload && !b.preload) {\n result.preload = false;\n }\n return result;\n};\n/**\n * Returns a new array of segments that is the result of merging\n * properties from an older list of segments onto an updated\n * list. No properties on the updated playlist will be ovewritten.\n *\n * @param {Array} original the outdated list of segments\n * @param {Array} update the updated list of segments\n * @param {number=} offset the index of the first update\n * segment in the original segment list. For non-live playlists,\n * this should always be zero and does not need to be\n * specified. For live playlists, it should be the difference\n * between the media sequence numbers in the original and updated\n * playlists.\n * @return {Array} a list of merged segment objects\n */\n\nconst updateSegments = (original, update, offset) => {\n const oldSegments = original.slice();\n const newSegments = update.slice();\n offset = offset || 0;\n const result = [];\n let currentMap;\n for (let newIndex = 0; newIndex < newSegments.length; newIndex++) {\n const oldSegment = oldSegments[newIndex + offset];\n const newSegment = newSegments[newIndex];\n if (oldSegment) {\n currentMap = oldSegment.map || currentMap;\n result.push(updateSegment(oldSegment, newSegment));\n } else {\n // carry over map to new segment if it is missing\n if (currentMap && !newSegment.map) {\n newSegment.map = currentMap;\n }\n result.push(newSegment);\n }\n }\n return result;\n};\nconst resolveSegmentUris = (segment, baseUri) => {\n // preloadSegment will not have a uri at all\n // as the segment isn't actually in the manifest yet, only parts\n if (!segment.resolvedUri && segment.uri) {\n segment.resolvedUri = resolveUrl(baseUri, segment.uri);\n }\n if (segment.key && !segment.key.resolvedUri) {\n segment.key.resolvedUri = resolveUrl(baseUri, segment.key.uri);\n }\n if (segment.map && !segment.map.resolvedUri) {\n segment.map.resolvedUri = resolveUrl(baseUri, segment.map.uri);\n }\n if (segment.map && segment.map.key && !segment.map.key.resolvedUri) {\n segment.map.key.resolvedUri = resolveUrl(baseUri, segment.map.key.uri);\n }\n if (segment.parts && segment.parts.length) {\n segment.parts.forEach(p => {\n if (p.resolvedUri) {\n return;\n }\n p.resolvedUri = resolveUrl(baseUri, p.uri);\n });\n }\n if (segment.preloadHints && segment.preloadHints.length) {\n segment.preloadHints.forEach(p => {\n if (p.resolvedUri) {\n return;\n }\n p.resolvedUri = resolveUrl(baseUri, p.uri);\n });\n }\n};\nconst getAllSegments = function (media) {\n const segments = media.segments || [];\n const preloadSegment = media.preloadSegment; // a preloadSegment with only preloadHints is not currently\n // a usable segment, only include a preloadSegment that has\n // parts.\n\n if (preloadSegment && preloadSegment.parts && preloadSegment.parts.length) {\n // if preloadHints has a MAP that means that the\n // init segment is going to change. We cannot use any of the parts\n // from this preload segment.\n if (preloadSegment.preloadHints) {\n for (let i = 0; i < preloadSegment.preloadHints.length; i++) {\n if (preloadSegment.preloadHints[i].type === 'MAP') {\n return segments;\n }\n }\n } // set the duration for our preload segment to target duration.\n\n preloadSegment.duration = media.targetDuration;\n preloadSegment.preload = true;\n segments.push(preloadSegment);\n }\n return segments;\n}; // consider the playlist unchanged if the playlist object is the same or\n// the number of segments is equal, the media sequence number is unchanged,\n// and this playlist hasn't become the end of the playlist\n\nconst isPlaylistUnchanged = (a, b) => a === b || a.segments && b.segments && a.segments.length === b.segments.length && a.endList === b.endList && a.mediaSequence === b.mediaSequence && a.preloadSegment === b.preloadSegment;\n/**\n * Returns a new main playlist that is the result of merging an\n * updated media playlist into the original version. If the\n * updated media playlist does not match any of the playlist\n * entries in the original main playlist, null is returned.\n *\n * @param {Object} main a parsed main M3U8 object\n * @param {Object} media a parsed media M3U8 object\n * @return {Object} a new object that represents the original\n * main playlist with the updated media playlist merged in, or\n * null if the merge produced no change.\n */\n\nconst updateMain$1 = (main, newMedia, unchangedCheck = isPlaylistUnchanged) => {\n const result = merge(main, {});\n const oldMedia = result.playlists[newMedia.id];\n if (!oldMedia) {\n return null;\n }\n if (unchangedCheck(oldMedia, newMedia)) {\n return null;\n }\n newMedia.segments = getAllSegments(newMedia);\n const mergedPlaylist = merge(oldMedia, newMedia); // always use the new media's preload segment\n\n if (mergedPlaylist.preloadSegment && !newMedia.preloadSegment) {\n delete mergedPlaylist.preloadSegment;\n } // if the update could overlap existing segment information, merge the two segment lists\n\n if (oldMedia.segments) {\n if (newMedia.skip) {\n newMedia.segments = newMedia.segments || []; // add back in objects for skipped segments, so that we merge\n // old properties into the new segments\n\n for (let i = 0; i < newMedia.skip.skippedSegments; i++) {\n newMedia.segments.unshift({\n skipped: true\n });\n }\n }\n mergedPlaylist.segments = updateSegments(oldMedia.segments, newMedia.segments, newMedia.mediaSequence - oldMedia.mediaSequence);\n } // resolve any segment URIs to prevent us from having to do it later\n\n mergedPlaylist.segments.forEach(segment => {\n resolveSegmentUris(segment, mergedPlaylist.resolvedUri);\n }); // TODO Right now in the playlists array there are two references to each playlist, one\n // that is referenced by index, and one by URI. The index reference may no longer be\n // necessary.\n\n for (let i = 0; i < result.playlists.length; i++) {\n if (result.playlists[i].id === newMedia.id) {\n result.playlists[i] = mergedPlaylist;\n }\n }\n result.playlists[newMedia.id] = mergedPlaylist; // URI reference added for backwards compatibility\n\n result.playlists[newMedia.uri] = mergedPlaylist; // update media group playlist references.\n\n forEachMediaGroup(main, (properties, mediaType, groupKey, labelKey) => {\n if (!properties.playlists) {\n return;\n }\n for (let i = 0; i < properties.playlists.length; i++) {\n if (newMedia.id === properties.playlists[i].id) {\n properties.playlists[i] = mergedPlaylist;\n }\n }\n });\n return result;\n};\n/**\n * Calculates the time to wait before refreshing a live playlist\n *\n * @param {Object} media\n * The current media\n * @param {boolean} update\n * True if there were any updates from the last refresh, false otherwise\n * @return {number}\n * The time in ms to wait before refreshing the live playlist\n */\n\nconst refreshDelay = (media, update) => {\n const segments = media.segments || [];\n const lastSegment = segments[segments.length - 1];\n const lastPart = lastSegment && lastSegment.parts && lastSegment.parts[lastSegment.parts.length - 1];\n const lastDuration = lastPart && lastPart.duration || lastSegment && lastSegment.duration;\n if (update && lastDuration) {\n return lastDuration * 1000;\n } // if the playlist is unchanged since the last reload or last segment duration\n // cannot be determined, try again after half the target duration\n\n return (media.partTargetDuration || media.targetDuration || 10) * 500;\n};\nconst playlistMetadataPayload = (playlists, type, isLive) => {\n if (!playlists) {\n return;\n }\n const renditions = [];\n playlists.forEach(playlist => {\n // we need attributes to populate rendition data.\n if (!playlist.attributes) {\n return;\n }\n const {\n BANDWIDTH,\n RESOLUTION,\n CODECS\n } = playlist.attributes;\n renditions.push({\n id: playlist.id,\n bandwidth: BANDWIDTH,\n resolution: RESOLUTION,\n codecs: CODECS\n });\n });\n return {\n type,\n isLive,\n renditions\n };\n};\n/**\n * Load a playlist from a remote location\n *\n * @class PlaylistLoader\n * @extends Stream\n * @param {string|Object} src url or object of manifest\n * @param {boolean} withCredentials the withCredentials xhr option\n * @class\n */\n\nclass PlaylistLoader extends EventTarget$1 {\n constructor(src, vhs, options = {}) {\n super();\n if (!src) {\n throw new Error('A non-empty playlist URL or object is required');\n }\n this.logger_ = logger('PlaylistLoader');\n const {\n withCredentials = false\n } = options;\n this.src = src;\n this.vhs_ = vhs;\n this.withCredentials = withCredentials;\n this.addDateRangesToTextTrack_ = options.addDateRangesToTextTrack;\n const vhsOptions = vhs.options_;\n this.customTagParsers = vhsOptions && vhsOptions.customTagParsers || [];\n this.customTagMappers = vhsOptions && vhsOptions.customTagMappers || [];\n this.llhls = vhsOptions && vhsOptions.llhls;\n this.dateRangesStorage_ = new DateRangesStorage(); // initialize the loader state\n\n this.state = 'HAVE_NOTHING'; // live playlist staleness timeout\n\n this.handleMediaupdatetimeout_ = this.handleMediaupdatetimeout_.bind(this);\n this.on('mediaupdatetimeout', this.handleMediaupdatetimeout_);\n this.on('loadedplaylist', this.handleLoadedPlaylist_.bind(this));\n }\n handleLoadedPlaylist_() {\n const mediaPlaylist = this.media();\n if (!mediaPlaylist) {\n return;\n }\n this.dateRangesStorage_.setOffset(mediaPlaylist.segments);\n this.dateRangesStorage_.setPendingDateRanges(mediaPlaylist.dateRanges);\n const availableDateRanges = this.dateRangesStorage_.getDateRangesToProcess();\n if (!availableDateRanges.length || !this.addDateRangesToTextTrack_) {\n return;\n }\n this.addDateRangesToTextTrack_(availableDateRanges);\n }\n handleMediaupdatetimeout_() {\n if (this.state !== 'HAVE_METADATA') {\n // only refresh the media playlist if no other activity is going on\n return;\n }\n const media = this.media();\n let uri = resolveUrl(this.main.uri, media.uri);\n if (this.llhls) {\n uri = addLLHLSQueryDirectives(uri, media);\n }\n this.state = 'HAVE_CURRENT_METADATA';\n this.request = this.vhs_.xhr({\n uri,\n withCredentials: this.withCredentials,\n requestType: 'hls-playlist'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n }\n if (error) {\n return this.playlistRequestError(this.request, this.media(), 'HAVE_METADATA');\n }\n this.haveMetadata({\n playlistString: this.request.responseText,\n url: this.media().uri,\n id: this.media().id\n });\n });\n }\n playlistRequestError(xhr, playlist, startingState) {\n const {\n uri,\n id\n } = playlist; // any in-flight request is now finished\n\n this.request = null;\n if (startingState) {\n this.state = startingState;\n }\n this.error = {\n playlist: this.main.playlists[id],\n status: xhr.status,\n message: `HLS playlist request error at URL: ${uri}.`,\n responseText: xhr.responseText,\n code: xhr.status >= 500 ? 4 : 2,\n metadata: getStreamingNetworkErrorMetadata({\n requestType: xhr.requestType,\n request: xhr,\n error: xhr.error\n })\n };\n this.trigger('error');\n }\n parseManifest_({\n url,\n manifestString\n }) {\n try {\n return parseManifest({\n onwarn: ({\n message\n }) => this.logger_(`m3u8-parser warn for ${url}: ${message}`),\n oninfo: ({\n message\n }) => this.logger_(`m3u8-parser info for ${url}: ${message}`),\n manifestString,\n customTagParsers: this.customTagParsers,\n customTagMappers: this.customTagMappers,\n llhls: this.llhls\n });\n } catch (error) {\n this.error = error;\n this.error.metadata = {\n errorType: videojs.Error.StreamingHlsPlaylistParserError,\n error\n };\n }\n }\n /**\n * Update the playlist loader's state in response to a new or updated playlist.\n *\n * @param {string} [playlistString]\n * Playlist string (if playlistObject is not provided)\n * @param {Object} [playlistObject]\n * Playlist object (if playlistString is not provided)\n * @param {string} url\n * URL of playlist\n * @param {string} id\n * ID to use for playlist\n */\n\n haveMetadata({\n playlistString,\n playlistObject,\n url,\n id\n }) {\n // any in-flight request is now finished\n this.request = null;\n this.state = 'HAVE_METADATA';\n const metadata = {\n playlistInfo: {\n type: 'media',\n uri: url\n }\n };\n this.trigger({\n type: 'playlistparsestart',\n metadata\n });\n const playlist = playlistObject || this.parseManifest_({\n url,\n manifestString: playlistString\n });\n playlist.lastRequest = Date.now();\n setupMediaPlaylist({\n playlist,\n uri: url,\n id\n }); // merge this playlist into the main manifest\n\n const update = updateMain$1(this.main, playlist);\n this.targetDuration = playlist.partTargetDuration || playlist.targetDuration;\n this.pendingMedia_ = null;\n if (update) {\n this.main = update;\n this.media_ = this.main.playlists[id];\n } else {\n this.trigger('playlistunchanged');\n }\n this.updateMediaUpdateTimeout_(refreshDelay(this.media(), !!update));\n metadata.parsedPlaylist = playlistMetadataPayload(this.main.playlists, metadata.playlistInfo.type, !this.media_.endList);\n this.trigger({\n type: 'playlistparsecomplete',\n metadata\n });\n this.trigger('loadedplaylist');\n }\n /**\n * Abort any outstanding work and clean up.\n */\n\n dispose() {\n this.trigger('dispose');\n this.stopRequest();\n window$1.clearTimeout(this.mediaUpdateTimeout);\n window$1.clearTimeout(this.finalRenditionTimeout);\n this.dateRangesStorage_ = new DateRangesStorage();\n this.off();\n }\n stopRequest() {\n if (this.request) {\n const oldRequest = this.request;\n this.request = null;\n oldRequest.onreadystatechange = null;\n oldRequest.abort();\n }\n }\n /**\n * When called without any arguments, returns the currently\n * active media playlist. When called with a single argument,\n * triggers the playlist loader to asynchronously switch to the\n * specified media playlist. Calling this method while the\n * loader is in the HAVE_NOTHING causes an error to be emitted\n * but otherwise has no effect.\n *\n * @param {Object=} playlist the parsed media playlist\n * object to switch to\n * @param {boolean=} shouldDelay whether we should delay the request by half target duration\n *\n * @return {Playlist} the current loaded media\n */\n\n media(playlist, shouldDelay) {\n // getter\n if (!playlist) {\n return this.media_;\n } // setter\n\n if (this.state === 'HAVE_NOTHING') {\n throw new Error('Cannot switch media playlist from ' + this.state);\n } // find the playlist object if the target playlist has been\n // specified by URI\n\n if (typeof playlist === 'string') {\n if (!this.main.playlists[playlist]) {\n throw new Error('Unknown playlist URI: ' + playlist);\n }\n playlist = this.main.playlists[playlist];\n }\n window$1.clearTimeout(this.finalRenditionTimeout);\n if (shouldDelay) {\n const delay = (playlist.partTargetDuration || playlist.targetDuration) / 2 * 1000 || 5 * 1000;\n this.finalRenditionTimeout = window$1.setTimeout(this.media.bind(this, playlist, false), delay);\n return;\n }\n const startingState = this.state;\n const mediaChange = !this.media_ || playlist.id !== this.media_.id;\n const mainPlaylistRef = this.main.playlists[playlist.id]; // switch to fully loaded playlists immediately\n\n if (mainPlaylistRef && mainPlaylistRef.endList ||\n // handle the case of a playlist object (e.g., if using vhs-json with a resolved\n // media playlist or, for the case of demuxed audio, a resolved audio media group)\n playlist.endList && playlist.segments.length) {\n // abort outstanding playlist requests\n if (this.request) {\n this.request.onreadystatechange = null;\n this.request.abort();\n this.request = null;\n }\n this.state = 'HAVE_METADATA';\n this.media_ = playlist; // trigger media change if the active media has been updated\n\n if (mediaChange) {\n this.trigger('mediachanging');\n if (startingState === 'HAVE_MAIN_MANIFEST') {\n // The initial playlist was a main manifest, and the first media selected was\n // also provided (in the form of a resolved playlist object) as part of the\n // source object (rather than just a URL). Therefore, since the media playlist\n // doesn't need to be requested, loadedmetadata won't trigger as part of the\n // normal flow, and needs an explicit trigger here.\n this.trigger('loadedmetadata');\n } else {\n this.trigger('mediachange');\n }\n }\n return;\n } // We update/set the timeout here so that live playlists\n // that are not a media change will \"start\" the loader as expected.\n // We expect that this function will start the media update timeout\n // cycle again. This also prevents a playlist switch failure from\n // causing us to stall during live.\n\n this.updateMediaUpdateTimeout_(refreshDelay(playlist, true)); // switching to the active playlist is a no-op\n\n if (!mediaChange) {\n return;\n }\n this.state = 'SWITCHING_MEDIA'; // there is already an outstanding playlist request\n\n if (this.request) {\n if (playlist.resolvedUri === this.request.url) {\n // requesting to switch to the same playlist multiple times\n // has no effect after the first\n return;\n }\n this.request.onreadystatechange = null;\n this.request.abort();\n this.request = null;\n } // request the new playlist\n\n if (this.media_) {\n this.trigger('mediachanging');\n }\n this.pendingMedia_ = playlist;\n const metadata = {\n playlistInfo: {\n type: 'media',\n uri: playlist.uri\n }\n };\n this.trigger({\n type: 'playlistrequeststart',\n metadata\n });\n this.request = this.vhs_.xhr({\n uri: playlist.resolvedUri,\n withCredentials: this.withCredentials,\n requestType: 'hls-playlist'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n }\n playlist.lastRequest = Date.now();\n playlist.resolvedUri = resolveManifestRedirect(playlist.resolvedUri, req);\n if (error) {\n return this.playlistRequestError(this.request, playlist, startingState);\n }\n this.trigger({\n type: 'playlistrequestcomplete',\n metadata\n });\n this.haveMetadata({\n playlistString: req.responseText,\n url: playlist.uri,\n id: playlist.id\n }); // fire loadedmetadata the first time a media playlist is loaded\n\n if (startingState === 'HAVE_MAIN_MANIFEST') {\n this.trigger('loadedmetadata');\n } else {\n this.trigger('mediachange');\n }\n });\n }\n /**\n * pause loading of the playlist\n */\n\n pause() {\n if (this.mediaUpdateTimeout) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n }\n this.stopRequest();\n if (this.state === 'HAVE_NOTHING') {\n // If we pause the loader before any data has been retrieved, its as if we never\n // started, so reset to an unstarted state.\n this.started = false;\n } // Need to restore state now that no activity is happening\n\n if (this.state === 'SWITCHING_MEDIA') {\n // if the loader was in the process of switching media, it should either return to\n // HAVE_MAIN_MANIFEST or HAVE_METADATA depending on if the loader has loaded a media\n // playlist yet. This is determined by the existence of loader.media_\n if (this.media_) {\n this.state = 'HAVE_METADATA';\n } else {\n this.state = 'HAVE_MAIN_MANIFEST';\n }\n } else if (this.state === 'HAVE_CURRENT_METADATA') {\n this.state = 'HAVE_METADATA';\n }\n }\n /**\n * start loading of the playlist\n */\n\n load(shouldDelay) {\n if (this.mediaUpdateTimeout) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n }\n const media = this.media();\n if (shouldDelay) {\n const delay = media ? (media.partTargetDuration || media.targetDuration) / 2 * 1000 : 5 * 1000;\n this.mediaUpdateTimeout = window$1.setTimeout(() => {\n this.mediaUpdateTimeout = null;\n this.load();\n }, delay);\n return;\n }\n if (!this.started) {\n this.start();\n return;\n }\n if (media && !media.endList) {\n this.trigger('mediaupdatetimeout');\n } else {\n this.trigger('loadedplaylist');\n }\n }\n updateMediaUpdateTimeout_(delay) {\n if (this.mediaUpdateTimeout) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n } // we only have use mediaupdatetimeout for live playlists.\n\n if (!this.media() || this.media().endList) {\n return;\n }\n this.mediaUpdateTimeout = window$1.setTimeout(() => {\n this.mediaUpdateTimeout = null;\n this.trigger('mediaupdatetimeout');\n this.updateMediaUpdateTimeout_(delay);\n }, delay);\n }\n /**\n * start loading of the playlist\n */\n\n start() {\n this.started = true;\n if (typeof this.src === 'object') {\n // in the case of an entirely constructed manifest object (meaning there's no actual\n // manifest on a server), default the uri to the page's href\n if (!this.src.uri) {\n this.src.uri = window$1.location.href;\n } // resolvedUri is added on internally after the initial request. Since there's no\n // request for pre-resolved manifests, add on resolvedUri here.\n\n this.src.resolvedUri = this.src.uri; // Since a manifest object was passed in as the source (instead of a URL), the first\n // request can be skipped (since the top level of the manifest, at a minimum, is\n // already available as a parsed manifest object). However, if the manifest object\n // represents a main playlist, some media playlists may need to be resolved before\n // the starting segment list is available. Therefore, go directly to setup of the\n // initial playlist, and let the normal flow continue from there.\n //\n // Note that the call to setup is asynchronous, as other sections of VHS may assume\n // that the first request is asynchronous.\n\n setTimeout(() => {\n this.setupInitialPlaylist(this.src);\n }, 0);\n return;\n }\n const metadata = {\n playlistInfo: {\n type: 'multivariant',\n uri: this.src\n }\n };\n this.trigger({\n type: 'playlistrequeststart',\n metadata\n }); // request the specified URL\n\n this.request = this.vhs_.xhr({\n uri: this.src,\n withCredentials: this.withCredentials,\n requestType: 'hls-playlist'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n } // clear the loader's request reference\n\n this.request = null;\n if (error) {\n this.error = {\n status: req.status,\n message: `HLS playlist request error at URL: ${this.src}.`,\n responseText: req.responseText,\n // MEDIA_ERR_NETWORK\n code: 2,\n metadata: getStreamingNetworkErrorMetadata({\n requestType: req.requestType,\n request: req,\n error\n })\n };\n if (this.state === 'HAVE_NOTHING') {\n this.started = false;\n }\n return this.trigger('error');\n }\n this.trigger({\n type: 'playlistrequestcomplete',\n metadata\n });\n this.src = resolveManifestRedirect(this.src, req);\n this.trigger({\n type: 'playlistparsestart',\n metadata\n });\n const manifest = this.parseManifest_({\n manifestString: req.responseText,\n url: this.src\n }); // we haven't loaded any variant playlists here so we default to false for isLive.\n\n metadata.parsedPlaylist = playlistMetadataPayload(manifest.playlists, metadata.playlistInfo.type, false);\n this.trigger({\n type: 'playlistparsecomplete',\n metadata\n });\n this.setupInitialPlaylist(manifest);\n });\n }\n srcUri() {\n return typeof this.src === 'string' ? this.src : this.src.uri;\n }\n /**\n * Given a manifest object that's either a main or media playlist, trigger the proper\n * events and set the state of the playlist loader.\n *\n * If the manifest object represents a main playlist, `loadedplaylist` will be\n * triggered to allow listeners to select a playlist. If none is selected, the loader\n * will default to the first one in the playlists array.\n *\n * If the manifest object represents a media playlist, `loadedplaylist` will be\n * triggered followed by `loadedmetadata`, as the only available playlist is loaded.\n *\n * In the case of a media playlist, a main playlist object wrapper with one playlist\n * will be created so that all logic can handle playlists in the same fashion (as an\n * assumed manifest object schema).\n *\n * @param {Object} manifest\n * The parsed manifest object\n */\n\n setupInitialPlaylist(manifest) {\n this.state = 'HAVE_MAIN_MANIFEST';\n if (manifest.playlists) {\n this.main = manifest;\n addPropertiesToMain(this.main, this.srcUri()); // If the initial main playlist has playlists wtih segments already resolved,\n // then resolve URIs in advance, as they are usually done after a playlist request,\n // which may not happen if the playlist is resolved.\n\n manifest.playlists.forEach(playlist => {\n playlist.segments = getAllSegments(playlist);\n playlist.segments.forEach(segment => {\n resolveSegmentUris(segment, playlist.resolvedUri);\n });\n });\n this.trigger('loadedplaylist');\n if (!this.request) {\n // no media playlist was specifically selected so start\n // from the first listed one\n this.media(this.main.playlists[0]);\n }\n return;\n } // In order to support media playlists passed in as vhs-json, the case where the uri\n // is not provided as part of the manifest should be considered, and an appropriate\n // default used.\n\n const uri = this.srcUri() || window$1.location.href;\n this.main = mainForMedia(manifest, uri);\n this.haveMetadata({\n playlistObject: manifest,\n url: uri,\n id: this.main.playlists[0].id\n });\n this.trigger('loadedmetadata');\n }\n /**\n * Updates or deletes a preexisting pathway clone.\n * Ensures that all playlists related to the old pathway clone are\n * either updated or deleted.\n *\n * @param {Object} clone On update, the pathway clone object for the newly updated pathway clone.\n * On delete, the old pathway clone object to be deleted.\n * @param {boolean} isUpdate True if the pathway is to be updated,\n * false if it is meant to be deleted.\n */\n\n updateOrDeleteClone(clone, isUpdate) {\n const main = this.main;\n const pathway = clone.ID;\n let i = main.playlists.length; // Iterate backwards through the playlist so we can remove playlists if necessary.\n\n while (i--) {\n const p = main.playlists[i];\n if (p.attributes['PATHWAY-ID'] === pathway) {\n const oldPlaylistUri = p.resolvedUri;\n const oldPlaylistId = p.id; // update the indexed playlist and add new playlists by ID and URI\n\n if (isUpdate) {\n const newPlaylistUri = this.createCloneURI_(p.resolvedUri, clone);\n const newPlaylistId = createPlaylistID(pathway, newPlaylistUri);\n const attributes = this.createCloneAttributes_(pathway, p.attributes);\n const updatedPlaylist = this.createClonePlaylist_(p, newPlaylistId, clone, attributes);\n main.playlists[i] = updatedPlaylist;\n main.playlists[newPlaylistId] = updatedPlaylist;\n main.playlists[newPlaylistUri] = updatedPlaylist;\n } else {\n // Remove the indexed playlist.\n main.playlists.splice(i, 1);\n } // Remove playlists by the old ID and URI.\n\n delete main.playlists[oldPlaylistId];\n delete main.playlists[oldPlaylistUri];\n }\n }\n this.updateOrDeleteCloneMedia(clone, isUpdate);\n }\n /**\n * Updates or deletes media data based on the pathway clone object.\n * Due to the complexity of the media groups and playlists, in all cases\n * we remove all of the old media groups and playlists.\n * On updates, we then create new media groups and playlists based on the\n * new pathway clone object.\n *\n * @param {Object} clone The pathway clone object for the newly updated pathway clone.\n * @param {boolean} isUpdate True if the pathway is to be updated,\n * false if it is meant to be deleted.\n */\n\n updateOrDeleteCloneMedia(clone, isUpdate) {\n const main = this.main;\n const id = clone.ID;\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(mediaType => {\n if (!main.mediaGroups[mediaType] || !main.mediaGroups[mediaType][id]) {\n return;\n }\n for (const groupKey in main.mediaGroups[mediaType]) {\n // Remove all media playlists for the media group for this pathway clone.\n if (groupKey === id) {\n for (const labelKey in main.mediaGroups[mediaType][groupKey]) {\n const oldMedia = main.mediaGroups[mediaType][groupKey][labelKey];\n oldMedia.playlists.forEach((p, i) => {\n const oldMediaPlaylist = main.playlists[p.id];\n const oldPlaylistId = oldMediaPlaylist.id;\n const oldPlaylistUri = oldMediaPlaylist.resolvedUri;\n delete main.playlists[oldPlaylistId];\n delete main.playlists[oldPlaylistUri];\n });\n } // Delete the old media group.\n\n delete main.mediaGroups[mediaType][groupKey];\n }\n }\n }); // Create the new media groups and playlists if there is an update.\n\n if (isUpdate) {\n this.createClonedMediaGroups_(clone);\n }\n }\n /**\n * Given a pathway clone object, clones all necessary playlists.\n *\n * @param {Object} clone The pathway clone object.\n * @param {Object} basePlaylist The original playlist to clone from.\n */\n\n addClonePathway(clone, basePlaylist = {}) {\n const main = this.main;\n const index = main.playlists.length;\n const uri = this.createCloneURI_(basePlaylist.resolvedUri, clone);\n const playlistId = createPlaylistID(clone.ID, uri);\n const attributes = this.createCloneAttributes_(clone.ID, basePlaylist.attributes);\n const playlist = this.createClonePlaylist_(basePlaylist, playlistId, clone, attributes);\n main.playlists[index] = playlist; // add playlist by ID and URI\n\n main.playlists[playlistId] = playlist;\n main.playlists[uri] = playlist;\n this.createClonedMediaGroups_(clone);\n }\n /**\n * Given a pathway clone object we create clones of all media.\n * In this function, all necessary information and updated playlists\n * are added to the `mediaGroup` object.\n * Playlists are also added to the `playlists` array so the media groups\n * will be properly linked.\n *\n * @param {Object} clone The pathway clone object.\n */\n\n createClonedMediaGroups_(clone) {\n const id = clone.ID;\n const baseID = clone['BASE-ID'];\n const main = this.main;\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(mediaType => {\n // If the media type doesn't exist, or there is already a clone, skip\n // to the next media type.\n if (!main.mediaGroups[mediaType] || main.mediaGroups[mediaType][id]) {\n return;\n }\n for (const groupKey in main.mediaGroups[mediaType]) {\n if (groupKey === baseID) {\n // Create the group.\n main.mediaGroups[mediaType][id] = {};\n } else {\n // There is no need to iterate over label keys in this case.\n continue;\n }\n for (const labelKey in main.mediaGroups[mediaType][groupKey]) {\n const oldMedia = main.mediaGroups[mediaType][groupKey][labelKey];\n main.mediaGroups[mediaType][id][labelKey] = _extends({}, oldMedia);\n const newMedia = main.mediaGroups[mediaType][id][labelKey]; // update URIs on the media\n\n const newUri = this.createCloneURI_(oldMedia.resolvedUri, clone);\n newMedia.resolvedUri = newUri;\n newMedia.uri = newUri; // Reset playlists in the new media group.\n\n newMedia.playlists = []; // Create new playlists in the newly cloned media group.\n\n oldMedia.playlists.forEach((p, i) => {\n const oldMediaPlaylist = main.playlists[p.id];\n const group = groupID(mediaType, id, labelKey);\n const newPlaylistID = createPlaylistID(id, group); // Check to see if it already exists\n\n if (oldMediaPlaylist && !main.playlists[newPlaylistID]) {\n const newMediaPlaylist = this.createClonePlaylist_(oldMediaPlaylist, newPlaylistID, clone);\n const newPlaylistUri = newMediaPlaylist.resolvedUri;\n main.playlists[newPlaylistID] = newMediaPlaylist;\n main.playlists[newPlaylistUri] = newMediaPlaylist;\n }\n newMedia.playlists[i] = this.createClonePlaylist_(p, newPlaylistID, clone);\n });\n }\n }\n });\n }\n /**\n * Using the original playlist to be cloned, and the pathway clone object\n * information, we create a new playlist.\n *\n * @param {Object} basePlaylist The original playlist to be cloned from.\n * @param {string} id The desired id of the newly cloned playlist.\n * @param {Object} clone The pathway clone object.\n * @param {Object} attributes An optional object to populate the `attributes` property in the playlist.\n *\n * @return {Object} The combined cloned playlist.\n */\n\n createClonePlaylist_(basePlaylist, id, clone, attributes) {\n const uri = this.createCloneURI_(basePlaylist.resolvedUri, clone);\n const newProps = {\n resolvedUri: uri,\n uri,\n id\n }; // Remove all segments from previous playlist in the clone.\n\n if (basePlaylist.segments) {\n newProps.segments = [];\n }\n if (attributes) {\n newProps.attributes = attributes;\n }\n return merge(basePlaylist, newProps);\n }\n /**\n * Generates an updated URI for a cloned pathway based on the original\n * pathway's URI and the paramaters from the pathway clone object in the\n * content steering server response.\n *\n * @param {string} baseUri URI to be updated in the cloned pathway.\n * @param {Object} clone The pathway clone object.\n *\n * @return {string} The updated URI for the cloned pathway.\n */\n\n createCloneURI_(baseURI, clone) {\n const uri = new URL(baseURI);\n uri.hostname = clone['URI-REPLACEMENT'].HOST;\n const params = clone['URI-REPLACEMENT'].PARAMS; // Add params to the cloned URL.\n\n for (const key of Object.keys(params)) {\n uri.searchParams.set(key, params[key]);\n }\n return uri.href;\n }\n /**\n * Helper function to create the attributes needed for the new clone.\n * This mainly adds the necessary media attributes.\n *\n * @param {string} id The pathway clone object ID.\n * @param {Object} oldAttributes The old attributes to compare to.\n * @return {Object} The new attributes to add to the playlist.\n */\n\n createCloneAttributes_(id, oldAttributes) {\n const attributes = {\n ['PATHWAY-ID']: id\n };\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(mediaType => {\n if (oldAttributes[mediaType]) {\n attributes[mediaType] = id;\n }\n });\n return attributes;\n }\n /**\n * Returns the key ID set from a playlist\n *\n * @param {playlist} playlist to fetch the key ID set from.\n * @return a Set of 32 digit hex strings that represent the unique keyIds for that playlist.\n */\n\n getKeyIdSet(playlist) {\n if (playlist.contentProtection) {\n const keyIds = new Set();\n for (const keysystem in playlist.contentProtection) {\n const keyId = playlist.contentProtection[keysystem].attributes.keyId;\n if (keyId) {\n keyIds.add(keyId.toLowerCase());\n }\n }\n return keyIds;\n }\n }\n}\n\n/**\n * @file xhr.js\n */\n\nconst callbackWrapper = function (request, error, response, callback) {\n const reqResponse = request.responseType === 'arraybuffer' ? request.response : request.responseText;\n if (!error && reqResponse) {\n request.responseTime = Date.now();\n request.roundTripTime = request.responseTime - request.requestTime;\n request.bytesReceived = reqResponse.byteLength || reqResponse.length;\n if (!request.bandwidth) {\n request.bandwidth = Math.floor(request.bytesReceived / request.roundTripTime * 8 * 1000);\n }\n }\n if (response.headers) {\n request.responseHeaders = response.headers;\n } // videojs.xhr now uses a specific code on the error\n // object to signal that a request has timed out instead\n // of setting a boolean on the request object\n\n if (error && error.code === 'ETIMEDOUT') {\n request.timedout = true;\n } // videojs.xhr no longer considers status codes outside of 200 and 0\n // (for file uris) to be errors, but the old XHR did, so emulate that\n // behavior. Status 206 may be used in response to byterange requests.\n\n if (!error && !request.aborted && response.statusCode !== 200 && response.statusCode !== 206 && response.statusCode !== 0) {\n error = new Error('XHR Failed with a response of: ' + (request && (reqResponse || request.responseText)));\n }\n callback(error, request);\n};\n/**\n * Iterates over the request hooks Set and calls them in order\n *\n * @param {Set} hooks the hook Set to iterate over\n * @param {Object} options the request options to pass to the xhr wrapper\n * @return the callback hook function return value, the modified or new options Object.\n */\n\nconst callAllRequestHooks = (requestSet, options) => {\n if (!requestSet || !requestSet.size) {\n return;\n }\n let newOptions = options;\n requestSet.forEach(requestCallback => {\n newOptions = requestCallback(newOptions);\n });\n return newOptions;\n};\n/**\n * Iterates over the response hooks Set and calls them in order.\n *\n * @param {Set} hooks the hook Set to iterate over\n * @param {Object} request the xhr request object\n * @param {Object} error the xhr error object\n * @param {Object} response the xhr response object\n */\n\nconst callAllResponseHooks = (responseSet, request, error, response) => {\n if (!responseSet || !responseSet.size) {\n return;\n }\n responseSet.forEach(responseCallback => {\n responseCallback(request, error, response);\n });\n};\nconst xhrFactory = function () {\n const xhr = function XhrFunction(options, callback) {\n // Add a default timeout\n options = merge({\n timeout: 45e3\n }, options); // Allow an optional user-specified function to modify the option\n // object before we construct the xhr request\n // TODO: Remove beforeRequest in the next major release.\n\n const beforeRequest = XhrFunction.beforeRequest || videojs.Vhs.xhr.beforeRequest; // onRequest and onResponse hooks as a Set, at either the player or global level.\n // TODO: new Set added here for beforeRequest alias. Remove this when beforeRequest is removed.\n\n const _requestCallbackSet = XhrFunction._requestCallbackSet || videojs.Vhs.xhr._requestCallbackSet || new Set();\n const _responseCallbackSet = XhrFunction._responseCallbackSet || videojs.Vhs.xhr._responseCallbackSet;\n if (beforeRequest && typeof beforeRequest === 'function') {\n videojs.log.warn('beforeRequest is deprecated, use onRequest instead.');\n _requestCallbackSet.add(beforeRequest);\n } // Use the standard videojs.xhr() method unless `videojs.Vhs.xhr` has been overriden\n // TODO: switch back to videojs.Vhs.xhr.name === 'XhrFunction' when we drop IE11\n\n const xhrMethod = videojs.Vhs.xhr.original === true ? videojs.xhr : videojs.Vhs.xhr; // call all registered onRequest hooks, assign new options.\n\n const beforeRequestOptions = callAllRequestHooks(_requestCallbackSet, options); // Remove the beforeRequest function from the hooks set so stale beforeRequest functions are not called.\n\n _requestCallbackSet.delete(beforeRequest); // xhrMethod will call XMLHttpRequest.open and XMLHttpRequest.send\n\n const request = xhrMethod(beforeRequestOptions || options, function (error, response) {\n // call all registered onResponse hooks\n callAllResponseHooks(_responseCallbackSet, request, error, response);\n return callbackWrapper(request, error, response, callback);\n });\n const originalAbort = request.abort;\n request.abort = function () {\n request.aborted = true;\n return originalAbort.apply(request, arguments);\n };\n request.uri = options.uri;\n request.requestType = options.requestType;\n request.requestTime = Date.now();\n return request;\n };\n xhr.original = true;\n return xhr;\n};\n/**\n * Turns segment byterange into a string suitable for use in\n * HTTP Range requests\n *\n * @param {Object} byterange - an object with two values defining the start and end\n * of a byte-range\n */\n\nconst byterangeStr = function (byterange) {\n // `byterangeEnd` is one less than `offset + length` because the HTTP range\n // header uses inclusive ranges\n let byterangeEnd;\n const byterangeStart = byterange.offset;\n if (typeof byterange.offset === 'bigint' || typeof byterange.length === 'bigint') {\n byterangeEnd = window$1.BigInt(byterange.offset) + window$1.BigInt(byterange.length) - window$1.BigInt(1);\n } else {\n byterangeEnd = byterange.offset + byterange.length - 1;\n }\n return 'bytes=' + byterangeStart + '-' + byterangeEnd;\n};\n/**\n * Defines headers for use in the xhr request for a particular segment.\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n */\n\nconst segmentXhrHeaders = function (segment) {\n const headers = {};\n if (segment.byterange) {\n headers.Range = byterangeStr(segment.byterange);\n }\n return headers;\n};\n\n/**\n * @file bin-utils.js\n */\n\n/**\n * convert a TimeRange to text\n *\n * @param {TimeRange} range the timerange to use for conversion\n * @param {number} i the iterator on the range to convert\n * @return {string} the range in string format\n */\n\nconst textRange = function (range, i) {\n return range.start(i) + '-' + range.end(i);\n};\n/**\n * format a number as hex string\n *\n * @param {number} e The number\n * @param {number} i the iterator\n * @return {string} the hex formatted number as a string\n */\n\nconst formatHexString = function (e, i) {\n const value = e.toString(16);\n return '00'.substring(0, 2 - value.length) + value + (i % 2 ? ' ' : '');\n};\nconst formatAsciiString = function (e) {\n if (e >= 0x20 && e < 0x7e) {\n return String.fromCharCode(e);\n }\n return '.';\n};\n/**\n * Creates an object for sending to a web worker modifying properties that are TypedArrays\n * into a new object with seperated properties for the buffer, byteOffset, and byteLength.\n *\n * @param {Object} message\n * Object of properties and values to send to the web worker\n * @return {Object}\n * Modified message with TypedArray values expanded\n * @function createTransferableMessage\n */\n\nconst createTransferableMessage = function (message) {\n const transferable = {};\n Object.keys(message).forEach(key => {\n const value = message[key];\n if (isArrayBufferView(value)) {\n transferable[key] = {\n bytes: value.buffer,\n byteOffset: value.byteOffset,\n byteLength: value.byteLength\n };\n } else {\n transferable[key] = value;\n }\n });\n return transferable;\n};\n/**\n * Returns a unique string identifier for a media initialization\n * segment.\n *\n * @param {Object} initSegment\n * the init segment object.\n *\n * @return {string} the generated init segment id\n */\n\nconst initSegmentId = function (initSegment) {\n const byterange = initSegment.byterange || {\n length: Infinity,\n offset: 0\n };\n return [byterange.length, byterange.offset, initSegment.resolvedUri].join(',');\n};\n/**\n * Returns a unique string identifier for a media segment key.\n *\n * @param {Object} key the encryption key\n * @return {string} the unique id for the media segment key.\n */\n\nconst segmentKeyId = function (key) {\n return key.resolvedUri;\n};\n/**\n * utils to help dump binary data to the console\n *\n * @param {Array|TypedArray} data\n * data to dump to a string\n *\n * @return {string} the data as a hex string.\n */\n\nconst hexDump = data => {\n const bytes = Array.prototype.slice.call(data);\n const step = 16;\n let result = '';\n let hex;\n let ascii;\n for (let j = 0; j < bytes.length / step; j++) {\n hex = bytes.slice(j * step, j * step + step).map(formatHexString).join('');\n ascii = bytes.slice(j * step, j * step + step).map(formatAsciiString).join('');\n result += hex + ' ' + ascii + '\\n';\n }\n return result;\n};\nconst tagDump = ({\n bytes\n}) => hexDump(bytes);\nconst textRanges = ranges => {\n let result = '';\n let i;\n for (i = 0; i < ranges.length; i++) {\n result += textRange(ranges, i) + ' ';\n }\n return result;\n};\nvar utils = /*#__PURE__*/Object.freeze({\n __proto__: null,\n createTransferableMessage: createTransferableMessage,\n initSegmentId: initSegmentId,\n segmentKeyId: segmentKeyId,\n hexDump: hexDump,\n tagDump: tagDump,\n textRanges: textRanges\n});\n\n// TODO handle fmp4 case where the timing info is accurate and doesn't involve transmux\n// 25% was arbitrarily chosen, and may need to be refined over time.\n\nconst SEGMENT_END_FUDGE_PERCENT = 0.25;\n/**\n * Converts a player time (any time that can be gotten/set from player.currentTime(),\n * e.g., any time within player.seekable().start(0) to player.seekable().end(0)) to a\n * program time (any time referencing the real world (e.g., EXT-X-PROGRAM-DATE-TIME)).\n *\n * The containing segment is required as the EXT-X-PROGRAM-DATE-TIME serves as an \"anchor\n * point\" (a point where we have a mapping from program time to player time, with player\n * time being the post transmux start of the segment).\n *\n * For more details, see [this doc](../../docs/program-time-from-player-time.md).\n *\n * @param {number} playerTime the player time\n * @param {Object} segment the segment which contains the player time\n * @return {Date} program time\n */\n\nconst playerTimeToProgramTime = (playerTime, segment) => {\n if (!segment.dateTimeObject) {\n // Can't convert without an \"anchor point\" for the program time (i.e., a time that can\n // be used to map the start of a segment with a real world time).\n return null;\n }\n const transmuxerPrependedSeconds = segment.videoTimingInfo.transmuxerPrependedSeconds;\n const transmuxedStart = segment.videoTimingInfo.transmuxedPresentationStart; // get the start of the content from before old content is prepended\n\n const startOfSegment = transmuxedStart + transmuxerPrependedSeconds;\n const offsetFromSegmentStart = playerTime - startOfSegment;\n return new Date(segment.dateTimeObject.getTime() + offsetFromSegmentStart * 1000);\n};\nconst originalSegmentVideoDuration = videoTimingInfo => {\n return videoTimingInfo.transmuxedPresentationEnd - videoTimingInfo.transmuxedPresentationStart - videoTimingInfo.transmuxerPrependedSeconds;\n};\n/**\n * Finds a segment that contains the time requested given as an ISO-8601 string. The\n * returned segment might be an estimate or an accurate match.\n *\n * @param {string} programTime The ISO-8601 programTime to find a match for\n * @param {Object} playlist A playlist object to search within\n */\n\nconst findSegmentForProgramTime = (programTime, playlist) => {\n // Assumptions:\n // - verifyProgramDateTimeTags has already been run\n // - live streams have been started\n let dateTimeObject;\n try {\n dateTimeObject = new Date(programTime);\n } catch (e) {\n return null;\n }\n if (!playlist || !playlist.segments || playlist.segments.length === 0) {\n return null;\n }\n let segment = playlist.segments[0];\n if (dateTimeObject < new Date(segment.dateTimeObject)) {\n // Requested time is before stream start.\n return null;\n }\n for (let i = 0; i < playlist.segments.length - 1; i++) {\n segment = playlist.segments[i];\n const nextSegmentStart = new Date(playlist.segments[i + 1].dateTimeObject);\n if (dateTimeObject < nextSegmentStart) {\n break;\n }\n }\n const lastSegment = playlist.segments[playlist.segments.length - 1];\n const lastSegmentStart = lastSegment.dateTimeObject;\n const lastSegmentDuration = lastSegment.videoTimingInfo ? originalSegmentVideoDuration(lastSegment.videoTimingInfo) : lastSegment.duration + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT;\n const lastSegmentEnd = new Date(lastSegmentStart.getTime() + lastSegmentDuration * 1000);\n if (dateTimeObject > lastSegmentEnd) {\n // Beyond the end of the stream, or our best guess of the end of the stream.\n return null;\n }\n if (dateTimeObject > new Date(lastSegmentStart)) {\n segment = lastSegment;\n }\n return {\n segment,\n estimatedStart: segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationStart : Playlist.duration(playlist, playlist.mediaSequence + playlist.segments.indexOf(segment)),\n // Although, given that all segments have accurate date time objects, the segment\n // selected should be accurate, unless the video has been transmuxed at some point\n // (determined by the presence of the videoTimingInfo object), the segment's \"player\n // time\" (the start time in the player) can't be considered accurate.\n type: segment.videoTimingInfo ? 'accurate' : 'estimate'\n };\n};\n/**\n * Finds a segment that contains the given player time(in seconds).\n *\n * @param {number} time The player time to find a match for\n * @param {Object} playlist A playlist object to search within\n */\n\nconst findSegmentForPlayerTime = (time, playlist) => {\n // Assumptions:\n // - there will always be a segment.duration\n // - we can start from zero\n // - segments are in time order\n if (!playlist || !playlist.segments || playlist.segments.length === 0) {\n return null;\n }\n let segmentEnd = 0;\n let segment;\n for (let i = 0; i < playlist.segments.length; i++) {\n segment = playlist.segments[i]; // videoTimingInfo is set after the segment is downloaded and transmuxed, and\n // should contain the most accurate values we have for the segment's player times.\n //\n // Use the accurate transmuxedPresentationEnd value if it is available, otherwise fall\n // back to an estimate based on the manifest derived (inaccurate) segment.duration, to\n // calculate an end value.\n\n segmentEnd = segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationEnd : segmentEnd + segment.duration;\n if (time <= segmentEnd) {\n break;\n }\n }\n const lastSegment = playlist.segments[playlist.segments.length - 1];\n if (lastSegment.videoTimingInfo && lastSegment.videoTimingInfo.transmuxedPresentationEnd < time) {\n // The time requested is beyond the stream end.\n return null;\n }\n if (time > segmentEnd) {\n // The time is within or beyond the last segment.\n //\n // Check to see if the time is beyond a reasonable guess of the end of the stream.\n if (time > segmentEnd + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT) {\n // Technically, because the duration value is only an estimate, the time may still\n // exist in the last segment, however, there isn't enough information to make even\n // a reasonable estimate.\n return null;\n }\n segment = lastSegment;\n }\n return {\n segment,\n estimatedStart: segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationStart : segmentEnd - segment.duration,\n // Because videoTimingInfo is only set after transmux, it is the only way to get\n // accurate timing values.\n type: segment.videoTimingInfo ? 'accurate' : 'estimate'\n };\n};\n/**\n * Gives the offset of the comparisonTimestamp from the programTime timestamp in seconds.\n * If the offset returned is positive, the programTime occurs after the\n * comparisonTimestamp.\n * If the offset is negative, the programTime occurs before the comparisonTimestamp.\n *\n * @param {string} comparisonTimeStamp An ISO-8601 timestamp to compare against\n * @param {string} programTime The programTime as an ISO-8601 string\n * @return {number} offset\n */\n\nconst getOffsetFromTimestamp = (comparisonTimeStamp, programTime) => {\n let segmentDateTime;\n let programDateTime;\n try {\n segmentDateTime = new Date(comparisonTimeStamp);\n programDateTime = new Date(programTime);\n } catch (e) {// TODO handle error\n }\n const segmentTimeEpoch = segmentDateTime.getTime();\n const programTimeEpoch = programDateTime.getTime();\n return (programTimeEpoch - segmentTimeEpoch) / 1000;\n};\n/**\n * Checks that all segments in this playlist have programDateTime tags.\n *\n * @param {Object} playlist A playlist object\n */\n\nconst verifyProgramDateTimeTags = playlist => {\n if (!playlist.segments || playlist.segments.length === 0) {\n return false;\n }\n for (let i = 0; i < playlist.segments.length; i++) {\n const segment = playlist.segments[i];\n if (!segment.dateTimeObject) {\n return false;\n }\n }\n return true;\n};\n/**\n * Returns the programTime of the media given a playlist and a playerTime.\n * The playlist must have programDateTime tags for a programDateTime tag to be returned.\n * If the segments containing the time requested have not been buffered yet, an estimate\n * may be returned to the callback.\n *\n * @param {Object} args\n * @param {Object} args.playlist A playlist object to search within\n * @param {number} time A playerTime in seconds\n * @param {Function} callback(err, programTime)\n * @return {string} err.message A detailed error message\n * @return {Object} programTime\n * @return {number} programTime.mediaSeconds The streamTime in seconds\n * @return {string} programTime.programDateTime The programTime as an ISO-8601 String\n */\n\nconst getProgramTime = ({\n playlist,\n time = undefined,\n callback\n}) => {\n if (!callback) {\n throw new Error('getProgramTime: callback must be provided');\n }\n if (!playlist || time === undefined) {\n return callback({\n message: 'getProgramTime: playlist and time must be provided'\n });\n }\n const matchedSegment = findSegmentForPlayerTime(time, playlist);\n if (!matchedSegment) {\n return callback({\n message: 'valid programTime was not found'\n });\n }\n if (matchedSegment.type === 'estimate') {\n return callback({\n message: 'Accurate programTime could not be determined.' + ' Please seek to e.seekTime and try again',\n seekTime: matchedSegment.estimatedStart\n });\n }\n const programTimeObject = {\n mediaSeconds: time\n };\n const programTime = playerTimeToProgramTime(time, matchedSegment.segment);\n if (programTime) {\n programTimeObject.programDateTime = programTime.toISOString();\n }\n return callback(null, programTimeObject);\n};\n/**\n * Seeks in the player to a time that matches the given programTime ISO-8601 string.\n *\n * @param {Object} args\n * @param {string} args.programTime A programTime to seek to as an ISO-8601 String\n * @param {Object} args.playlist A playlist to look within\n * @param {number} args.retryCount The number of times to try for an accurate seek. Default is 2.\n * @param {Function} args.seekTo A method to perform a seek\n * @param {boolean} args.pauseAfterSeek Whether to end in a paused state after seeking. Default is true.\n * @param {Object} args.tech The tech to seek on\n * @param {Function} args.callback(err, newTime) A callback to return the new time to\n * @return {string} err.message A detailed error message\n * @return {number} newTime The exact time that was seeked to in seconds\n */\n\nconst seekToProgramTime = ({\n programTime,\n playlist,\n retryCount = 2,\n seekTo,\n pauseAfterSeek = true,\n tech,\n callback\n}) => {\n if (!callback) {\n throw new Error('seekToProgramTime: callback must be provided');\n }\n if (typeof programTime === 'undefined' || !playlist || !seekTo) {\n return callback({\n message: 'seekToProgramTime: programTime, seekTo and playlist must be provided'\n });\n }\n if (!playlist.endList && !tech.hasStarted_) {\n return callback({\n message: 'player must be playing a live stream to start buffering'\n });\n }\n if (!verifyProgramDateTimeTags(playlist)) {\n return callback({\n message: 'programDateTime tags must be provided in the manifest ' + playlist.resolvedUri\n });\n }\n const matchedSegment = findSegmentForProgramTime(programTime, playlist); // no match\n\n if (!matchedSegment) {\n return callback({\n message: `${programTime} was not found in the stream`\n });\n }\n const segment = matchedSegment.segment;\n const mediaOffset = getOffsetFromTimestamp(segment.dateTimeObject, programTime);\n if (matchedSegment.type === 'estimate') {\n // we've run out of retries\n if (retryCount === 0) {\n return callback({\n message: `${programTime} is not buffered yet. Try again`\n });\n }\n seekTo(matchedSegment.estimatedStart + mediaOffset);\n tech.one('seeked', () => {\n seekToProgramTime({\n programTime,\n playlist,\n retryCount: retryCount - 1,\n seekTo,\n pauseAfterSeek,\n tech,\n callback\n });\n });\n return;\n } // Since the segment.start value is determined from the buffered end or ending time\n // of the prior segment, the seekToTime doesn't need to account for any transmuxer\n // modifications.\n\n const seekToTime = segment.start + mediaOffset;\n const seekedCallback = () => {\n return callback(null, tech.currentTime());\n }; // listen for seeked event\n\n tech.one('seeked', seekedCallback); // pause before seeking as video.js will restore this state\n\n if (pauseAfterSeek) {\n tech.pause();\n }\n seekTo(seekToTime);\n};\n\n// which will only happen if the request is complete.\n\nconst callbackOnCompleted = (request, cb) => {\n if (request.readyState === 4) {\n return cb();\n }\n return;\n};\nconst containerRequest = (uri, xhr, cb, requestType) => {\n let bytes = [];\n let id3Offset;\n let finished = false;\n const endRequestAndCallback = function (err, req, type, _bytes) {\n req.abort();\n finished = true;\n return cb(err, req, type, _bytes);\n };\n const progressListener = function (error, request) {\n if (finished) {\n return;\n }\n if (error) {\n error.metadata = getStreamingNetworkErrorMetadata({\n requestType,\n request,\n error\n });\n return endRequestAndCallback(error, request, '', bytes);\n } // grap the new part of content that was just downloaded\n\n const newPart = request.responseText.substring(bytes && bytes.byteLength || 0, request.responseText.length); // add that onto bytes\n\n bytes = concatTypedArrays(bytes, stringToBytes(newPart, true));\n id3Offset = id3Offset || getId3Offset(bytes); // we need at least 10 bytes to determine a type\n // or we need at least two bytes after an id3Offset\n\n if (bytes.length < 10 || id3Offset && bytes.length < id3Offset + 2) {\n return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));\n }\n const type = detectContainerForBytes(bytes); // if this looks like a ts segment but we don't have enough data\n // to see the second sync byte, wait until we have enough data\n // before declaring it ts\n\n if (type === 'ts' && bytes.length < 188) {\n return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));\n } // this may be an unsynced ts segment\n // wait for 376 bytes before detecting no container\n\n if (!type && bytes.length < 376) {\n return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));\n }\n return endRequestAndCallback(null, request, type, bytes);\n };\n const options = {\n uri,\n beforeSend(request) {\n // this forces the browser to pass the bytes to us unprocessed\n request.overrideMimeType('text/plain; charset=x-user-defined');\n request.addEventListener('progress', function ({\n total,\n loaded\n }) {\n return callbackWrapper(request, null, {\n statusCode: request.status\n }, progressListener);\n });\n }\n };\n const request = xhr(options, function (error, response) {\n return callbackWrapper(request, error, response, progressListener);\n });\n return request;\n};\nconst {\n EventTarget\n} = videojs;\nconst dashPlaylistUnchanged = function (a, b) {\n if (!isPlaylistUnchanged(a, b)) {\n return false;\n } // for dash the above check will often return true in scenarios where\n // the playlist actually has changed because mediaSequence isn't a\n // dash thing, and we often set it to 1. So if the playlists have the same amount\n // of segments we return true.\n // So for dash we need to make sure that the underlying segments are different.\n // if sidx changed then the playlists are different.\n\n if (a.sidx && b.sidx && (a.sidx.offset !== b.sidx.offset || a.sidx.length !== b.sidx.length)) {\n return false;\n } else if (!a.sidx && b.sidx || a.sidx && !b.sidx) {\n return false;\n } // one or the other does not have segments\n // there was a change.\n\n if (a.segments && !b.segments || !a.segments && b.segments) {\n return false;\n } // neither has segments nothing changed\n\n if (!a.segments && !b.segments) {\n return true;\n } // check segments themselves\n\n for (let i = 0; i < a.segments.length; i++) {\n const aSegment = a.segments[i];\n const bSegment = b.segments[i]; // if uris are different between segments there was a change\n\n if (aSegment.uri !== bSegment.uri) {\n return false;\n } // neither segment has a byterange, there will be no byterange change.\n\n if (!aSegment.byterange && !bSegment.byterange) {\n continue;\n }\n const aByterange = aSegment.byterange;\n const bByterange = bSegment.byterange; // if byterange only exists on one of the segments, there was a change.\n\n if (aByterange && !bByterange || !aByterange && bByterange) {\n return false;\n } // if both segments have byterange with different offsets, there was a change.\n\n if (aByterange.offset !== bByterange.offset || aByterange.length !== bByterange.length) {\n return false;\n }\n } // if everything was the same with segments, this is the same playlist.\n\n return true;\n};\n/**\n * Use the representation IDs from the mpd object to create groupIDs, the NAME is set to mandatory representation\n * ID in the parser. This allows for continuous playout across periods with the same representation IDs\n * (continuous periods as defined in DASH-IF 3.2.12). This is assumed in the mpd-parser as well. If we want to support\n * periods without continuous playback this function may need modification as well as the parser.\n */\n\nconst dashGroupId = (type, group, label, playlist) => {\n // If the manifest somehow does not have an ID (non-dash compliant), use the label.\n const playlistId = playlist.attributes.NAME || label;\n return `placeholder-uri-${type}-${group}-${playlistId}`;\n};\n/**\n * Parses the main XML string and updates playlist URI references.\n *\n * @param {Object} config\n * Object of arguments\n * @param {string} config.mainXml\n * The mpd XML\n * @param {string} config.srcUrl\n * The mpd URL\n * @param {Date} config.clientOffset\n * A time difference between server and client\n * @param {Object} config.sidxMapping\n * SIDX mappings for moof/mdat URIs and byte ranges\n * @return {Object}\n * The parsed mpd manifest object\n */\n\nconst parseMainXml = ({\n mainXml,\n srcUrl,\n clientOffset,\n sidxMapping,\n previousManifest\n}) => {\n const manifest = parse(mainXml, {\n manifestUri: srcUrl,\n clientOffset,\n sidxMapping,\n previousManifest\n });\n addPropertiesToMain(manifest, srcUrl, dashGroupId);\n return manifest;\n};\n/**\n * Removes any mediaGroup labels that no longer exist in the newMain\n *\n * @param {Object} update\n * The previous mpd object being updated\n * @param {Object} newMain\n * The new mpd object\n */\n\nconst removeOldMediaGroupLabels = (update, newMain) => {\n forEachMediaGroup(update, (properties, type, group, label) => {\n if (!(label in newMain.mediaGroups[type][group])) {\n delete update.mediaGroups[type][group][label];\n }\n });\n};\n/**\n * Returns a new main manifest that is the result of merging an updated main manifest\n * into the original version.\n *\n * @param {Object} oldMain\n * The old parsed mpd object\n * @param {Object} newMain\n * The updated parsed mpd object\n * @return {Object}\n * A new object representing the original main manifest with the updated media\n * playlists merged in\n */\n\nconst updateMain = (oldMain, newMain, sidxMapping) => {\n let noChanges = true;\n let update = merge(oldMain, {\n // These are top level properties that can be updated\n duration: newMain.duration,\n minimumUpdatePeriod: newMain.minimumUpdatePeriod,\n timelineStarts: newMain.timelineStarts\n }); // First update the playlists in playlist list\n\n for (let i = 0; i < newMain.playlists.length; i++) {\n const playlist = newMain.playlists[i];\n if (playlist.sidx) {\n const sidxKey = generateSidxKey(playlist.sidx); // add sidx segments to the playlist if we have all the sidx info already\n\n if (sidxMapping && sidxMapping[sidxKey] && sidxMapping[sidxKey].sidx) {\n addSidxSegmentsToPlaylist(playlist, sidxMapping[sidxKey].sidx, playlist.sidx.resolvedUri);\n }\n }\n const playlistUpdate = updateMain$1(update, playlist, dashPlaylistUnchanged);\n if (playlistUpdate) {\n update = playlistUpdate;\n noChanges = false;\n }\n } // Then update media group playlists\n\n forEachMediaGroup(newMain, (properties, type, group, label) => {\n if (properties.playlists && properties.playlists.length) {\n const id = properties.playlists[0].id;\n const playlistUpdate = updateMain$1(update, properties.playlists[0], dashPlaylistUnchanged);\n if (playlistUpdate) {\n update = playlistUpdate; // add new mediaGroup label if it doesn't exist and assign the new mediaGroup.\n\n if (!(label in update.mediaGroups[type][group])) {\n update.mediaGroups[type][group][label] = properties;\n } // update the playlist reference within media groups\n\n update.mediaGroups[type][group][label].playlists[0] = update.playlists[id];\n noChanges = false;\n }\n }\n }); // remove mediaGroup labels and references that no longer exist in the newMain\n\n removeOldMediaGroupLabels(update, newMain);\n if (newMain.minimumUpdatePeriod !== oldMain.minimumUpdatePeriod) {\n noChanges = false;\n }\n if (noChanges) {\n return null;\n }\n return update;\n}; // SIDX should be equivalent if the URI and byteranges of the SIDX match.\n// If the SIDXs have maps, the two maps should match,\n// both `a` and `b` missing SIDXs is considered matching.\n// If `a` or `b` but not both have a map, they aren't matching.\n\nconst equivalentSidx = (a, b) => {\n const neitherMap = Boolean(!a.map && !b.map);\n const equivalentMap = neitherMap || Boolean(a.map && b.map && a.map.byterange.offset === b.map.byterange.offset && a.map.byterange.length === b.map.byterange.length);\n return equivalentMap && a.uri === b.uri && a.byterange.offset === b.byterange.offset && a.byterange.length === b.byterange.length;\n}; // exported for testing\n\nconst compareSidxEntry = (playlists, oldSidxMapping) => {\n const newSidxMapping = {};\n for (const id in playlists) {\n const playlist = playlists[id];\n const currentSidxInfo = playlist.sidx;\n if (currentSidxInfo) {\n const key = generateSidxKey(currentSidxInfo);\n if (!oldSidxMapping[key]) {\n break;\n }\n const savedSidxInfo = oldSidxMapping[key].sidxInfo;\n if (equivalentSidx(savedSidxInfo, currentSidxInfo)) {\n newSidxMapping[key] = oldSidxMapping[key];\n }\n }\n }\n return newSidxMapping;\n};\n/**\n * A function that filters out changed items as they need to be requested separately.\n *\n * The method is exported for testing\n *\n * @param {Object} main the parsed mpd XML returned via mpd-parser\n * @param {Object} oldSidxMapping the SIDX to compare against\n */\n\nconst filterChangedSidxMappings = (main, oldSidxMapping) => {\n const videoSidx = compareSidxEntry(main.playlists, oldSidxMapping);\n let mediaGroupSidx = videoSidx;\n forEachMediaGroup(main, (properties, mediaType, groupKey, labelKey) => {\n if (properties.playlists && properties.playlists.length) {\n const playlists = properties.playlists;\n mediaGroupSidx = merge(mediaGroupSidx, compareSidxEntry(playlists, oldSidxMapping));\n }\n });\n return mediaGroupSidx;\n};\nclass DashPlaylistLoader extends EventTarget {\n // DashPlaylistLoader must accept either a src url or a playlist because subsequent\n // playlist loader setups from media groups will expect to be able to pass a playlist\n // (since there aren't external URLs to media playlists with DASH)\n constructor(srcUrlOrPlaylist, vhs, options = {}, mainPlaylistLoader) {\n super();\n this.mainPlaylistLoader_ = mainPlaylistLoader || this;\n if (!mainPlaylistLoader) {\n this.isMain_ = true;\n }\n const {\n withCredentials = false\n } = options;\n this.vhs_ = vhs;\n this.withCredentials = withCredentials;\n this.addMetadataToTextTrack = options.addMetadataToTextTrack;\n if (!srcUrlOrPlaylist) {\n throw new Error('A non-empty playlist URL or object is required');\n } // event naming?\n\n this.on('minimumUpdatePeriod', () => {\n this.refreshXml_();\n }); // live playlist staleness timeout\n\n this.on('mediaupdatetimeout', () => {\n this.refreshMedia_(this.media().id);\n });\n this.state = 'HAVE_NOTHING';\n this.loadedPlaylists_ = {};\n this.logger_ = logger('DashPlaylistLoader'); // initialize the loader state\n // The mainPlaylistLoader will be created with a string\n\n if (this.isMain_) {\n this.mainPlaylistLoader_.srcUrl = srcUrlOrPlaylist; // TODO: reset sidxMapping between period changes\n // once multi-period is refactored\n\n this.mainPlaylistLoader_.sidxMapping_ = {};\n } else {\n this.childPlaylist_ = srcUrlOrPlaylist;\n }\n }\n requestErrored_(err, request, startingState) {\n // disposed\n if (!this.request) {\n return true;\n } // pending request is cleared\n\n this.request = null;\n if (err) {\n // use the provided error object or create one\n // based on the request/response\n this.error = typeof err === 'object' && !(err instanceof Error) ? err : {\n status: request.status,\n message: 'DASH request error at URL: ' + request.uri,\n response: request.response,\n // MEDIA_ERR_NETWORK\n code: 2,\n metadata: err.metadata\n };\n if (startingState) {\n this.state = startingState;\n }\n this.trigger('error');\n return true;\n }\n }\n /**\n * Verify that the container of the sidx segment can be parsed\n * and if it can, get and parse that segment.\n */\n\n addSidxSegments_(playlist, startingState, cb) {\n const sidxKey = playlist.sidx && generateSidxKey(playlist.sidx); // playlist lacks sidx or sidx segments were added to this playlist already.\n\n if (!playlist.sidx || !sidxKey || this.mainPlaylistLoader_.sidxMapping_[sidxKey]) {\n // keep this function async\n this.mediaRequest_ = window$1.setTimeout(() => cb(false), 0);\n return;\n } // resolve the segment URL relative to the playlist\n\n const uri = resolveManifestRedirect(playlist.sidx.resolvedUri);\n const fin = (err, request) => {\n if (this.requestErrored_(err, request, startingState)) {\n return;\n }\n const sidxMapping = this.mainPlaylistLoader_.sidxMapping_;\n const {\n requestType\n } = request;\n let sidx;\n try {\n sidx = parseSidx(toUint8(request.response).subarray(8));\n } catch (e) {\n e.metadata = getStreamingNetworkErrorMetadata({\n requestType,\n request,\n parseFailure: true\n }); // sidx parsing failed.\n\n this.requestErrored_(e, request, startingState);\n return;\n }\n sidxMapping[sidxKey] = {\n sidxInfo: playlist.sidx,\n sidx\n };\n addSidxSegmentsToPlaylist(playlist, sidx, playlist.sidx.resolvedUri);\n return cb(true);\n };\n const REQUEST_TYPE = 'dash-sidx';\n this.request = containerRequest(uri, this.vhs_.xhr, (err, request, container, bytes) => {\n if (err) {\n return fin(err, request);\n }\n if (!container || container !== 'mp4') {\n const sidxContainer = container || 'unknown';\n return fin({\n status: request.status,\n message: `Unsupported ${sidxContainer} container type for sidx segment at URL: ${uri}`,\n // response is just bytes in this case\n // but we really don't want to return that.\n response: '',\n playlist,\n internal: true,\n playlistExclusionDuration: Infinity,\n // MEDIA_ERR_NETWORK\n code: 2\n }, request);\n } // if we already downloaded the sidx bytes in the container request, use them\n\n const {\n offset,\n length\n } = playlist.sidx.byterange;\n if (bytes.length >= length + offset) {\n return fin(err, {\n response: bytes.subarray(offset, offset + length),\n status: request.status,\n uri: request.uri\n });\n } // otherwise request sidx bytes\n\n this.request = this.vhs_.xhr({\n uri,\n responseType: 'arraybuffer',\n requestType: 'dash-sidx',\n headers: segmentXhrHeaders({\n byterange: playlist.sidx.byterange\n })\n }, fin);\n }, REQUEST_TYPE);\n }\n dispose() {\n this.trigger('dispose');\n this.stopRequest();\n this.loadedPlaylists_ = {};\n window$1.clearTimeout(this.minimumUpdatePeriodTimeout_);\n window$1.clearTimeout(this.mediaRequest_);\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n this.mediaRequest_ = null;\n this.minimumUpdatePeriodTimeout_ = null;\n if (this.mainPlaylistLoader_.createMupOnMedia_) {\n this.off('loadedmetadata', this.mainPlaylistLoader_.createMupOnMedia_);\n this.mainPlaylistLoader_.createMupOnMedia_ = null;\n }\n this.off();\n }\n hasPendingRequest() {\n return this.request || this.mediaRequest_;\n }\n stopRequest() {\n if (this.request) {\n const oldRequest = this.request;\n this.request = null;\n oldRequest.onreadystatechange = null;\n oldRequest.abort();\n }\n }\n media(playlist) {\n // getter\n if (!playlist) {\n return this.media_;\n } // setter\n\n if (this.state === 'HAVE_NOTHING') {\n throw new Error('Cannot switch media playlist from ' + this.state);\n }\n const startingState = this.state; // find the playlist object if the target playlist has been specified by URI\n\n if (typeof playlist === 'string') {\n if (!this.mainPlaylistLoader_.main.playlists[playlist]) {\n throw new Error('Unknown playlist URI: ' + playlist);\n }\n playlist = this.mainPlaylistLoader_.main.playlists[playlist];\n }\n const mediaChange = !this.media_ || playlist.id !== this.media_.id; // switch to previously loaded playlists immediately\n\n if (mediaChange && this.loadedPlaylists_[playlist.id] && this.loadedPlaylists_[playlist.id].endList) {\n this.state = 'HAVE_METADATA';\n this.media_ = playlist; // trigger media change if the active media has been updated\n\n if (mediaChange) {\n this.trigger('mediachanging');\n this.trigger('mediachange');\n }\n return;\n } // switching to the active playlist is a no-op\n\n if (!mediaChange) {\n return;\n } // switching from an already loaded playlist\n\n if (this.media_) {\n this.trigger('mediachanging');\n }\n this.addSidxSegments_(playlist, startingState, sidxChanged => {\n // everything is ready just continue to haveMetadata\n this.haveMetadata({\n startingState,\n playlist\n });\n });\n }\n haveMetadata({\n startingState,\n playlist\n }) {\n this.state = 'HAVE_METADATA';\n this.loadedPlaylists_[playlist.id] = playlist;\n this.mediaRequest_ = null; // This will trigger loadedplaylist\n\n this.refreshMedia_(playlist.id); // fire loadedmetadata the first time a media playlist is loaded\n // to resolve setup of media groups\n\n if (startingState === 'HAVE_MAIN_MANIFEST') {\n this.trigger('loadedmetadata');\n } else {\n // trigger media change if the active media has been updated\n this.trigger('mediachange');\n }\n }\n pause() {\n if (this.mainPlaylistLoader_.createMupOnMedia_) {\n this.off('loadedmetadata', this.mainPlaylistLoader_.createMupOnMedia_);\n this.mainPlaylistLoader_.createMupOnMedia_ = null;\n }\n this.stopRequest();\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n if (this.isMain_) {\n window$1.clearTimeout(this.mainPlaylistLoader_.minimumUpdatePeriodTimeout_);\n this.mainPlaylistLoader_.minimumUpdatePeriodTimeout_ = null;\n }\n if (this.state === 'HAVE_NOTHING') {\n // If we pause the loader before any data has been retrieved, its as if we never\n // started, so reset to an unstarted state.\n this.started = false;\n }\n }\n load(isFinalRendition) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n const media = this.media();\n if (isFinalRendition) {\n const delay = media ? media.targetDuration / 2 * 1000 : 5 * 1000;\n this.mediaUpdateTimeout = window$1.setTimeout(() => this.load(), delay);\n return;\n } // because the playlists are internal to the manifest, load should either load the\n // main manifest, or do nothing but trigger an event\n\n if (!this.started) {\n this.start();\n return;\n }\n if (media && !media.endList) {\n // Check to see if this is the main loader and the MUP was cleared (this happens\n // when the loader was paused). `media` should be set at this point since one is always\n // set during `start()`.\n if (this.isMain_ && !this.minimumUpdatePeriodTimeout_) {\n // Trigger minimumUpdatePeriod to refresh the main manifest\n this.trigger('minimumUpdatePeriod'); // Since there was no prior minimumUpdatePeriodTimeout it should be recreated\n\n this.updateMinimumUpdatePeriodTimeout_();\n }\n this.trigger('mediaupdatetimeout');\n } else {\n this.trigger('loadedplaylist');\n }\n }\n start() {\n this.started = true; // We don't need to request the main manifest again\n // Call this asynchronously to match the xhr request behavior below\n\n if (!this.isMain_) {\n this.mediaRequest_ = window$1.setTimeout(() => this.haveMain_(), 0);\n return;\n }\n this.requestMain_((req, mainChanged) => {\n this.haveMain_();\n if (!this.hasPendingRequest() && !this.media_) {\n this.media(this.mainPlaylistLoader_.main.playlists[0]);\n }\n });\n }\n requestMain_(cb) {\n const metadata = {\n manifestInfo: {\n uri: this.mainPlaylistLoader_.srcUrl\n }\n };\n this.trigger({\n type: 'manifestrequeststart',\n metadata\n });\n this.request = this.vhs_.xhr({\n uri: this.mainPlaylistLoader_.srcUrl,\n withCredentials: this.withCredentials,\n requestType: 'dash-manifest'\n }, (error, req) => {\n if (error) {\n const {\n requestType\n } = req;\n error.metadata = getStreamingNetworkErrorMetadata({\n requestType,\n request: req,\n error\n });\n }\n if (this.requestErrored_(error, req)) {\n if (this.state === 'HAVE_NOTHING') {\n this.started = false;\n }\n return;\n }\n this.trigger({\n type: 'manifestrequestcomplete',\n metadata\n });\n const mainChanged = req.responseText !== this.mainPlaylistLoader_.mainXml_;\n this.mainPlaylistLoader_.mainXml_ = req.responseText;\n if (req.responseHeaders && req.responseHeaders.date) {\n this.mainLoaded_ = Date.parse(req.responseHeaders.date);\n } else {\n this.mainLoaded_ = Date.now();\n }\n this.mainPlaylistLoader_.srcUrl = resolveManifestRedirect(this.mainPlaylistLoader_.srcUrl, req);\n if (mainChanged) {\n this.handleMain_();\n this.syncClientServerClock_(() => {\n return cb(req, mainChanged);\n });\n return;\n }\n return cb(req, mainChanged);\n });\n }\n /**\n * Parses the main xml for UTCTiming node to sync the client clock to the server\n * clock. If the UTCTiming node requires a HEAD or GET request, that request is made.\n *\n * @param {Function} done\n * Function to call when clock sync has completed\n */\n\n syncClientServerClock_(done) {\n const utcTiming = parseUTCTiming(this.mainPlaylistLoader_.mainXml_); // No UTCTiming element found in the mpd. Use Date header from mpd request as the\n // server clock\n\n if (utcTiming === null) {\n this.mainPlaylistLoader_.clientOffset_ = this.mainLoaded_ - Date.now();\n return done();\n }\n if (utcTiming.method === 'DIRECT') {\n this.mainPlaylistLoader_.clientOffset_ = utcTiming.value - Date.now();\n return done();\n }\n this.request = this.vhs_.xhr({\n uri: resolveUrl(this.mainPlaylistLoader_.srcUrl, utcTiming.value),\n method: utcTiming.method,\n withCredentials: this.withCredentials,\n requestType: 'dash-clock-sync'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n }\n if (error) {\n const {\n requestType\n } = req;\n this.error.metadata = getStreamingNetworkErrorMetadata({\n requestType,\n request: req,\n error\n }); // sync request failed, fall back to using date header from mpd\n // TODO: log warning\n\n this.mainPlaylistLoader_.clientOffset_ = this.mainLoaded_ - Date.now();\n return done();\n }\n let serverTime;\n if (utcTiming.method === 'HEAD') {\n if (!req.responseHeaders || !req.responseHeaders.date) {\n // expected date header not preset, fall back to using date header from mpd\n // TODO: log warning\n serverTime = this.mainLoaded_;\n } else {\n serverTime = Date.parse(req.responseHeaders.date);\n }\n } else {\n serverTime = Date.parse(req.responseText);\n }\n this.mainPlaylistLoader_.clientOffset_ = serverTime - Date.now();\n done();\n });\n }\n haveMain_() {\n this.state = 'HAVE_MAIN_MANIFEST';\n if (this.isMain_) {\n // We have the main playlist at this point, so\n // trigger this to allow PlaylistController\n // to make an initial playlist selection\n this.trigger('loadedplaylist');\n } else if (!this.media_) {\n // no media playlist was specifically selected so select\n // the one the child playlist loader was created with\n this.media(this.childPlaylist_);\n }\n }\n handleMain_() {\n // clear media request\n this.mediaRequest_ = null;\n const oldMain = this.mainPlaylistLoader_.main;\n const metadata = {\n manifestInfo: {\n uri: this.mainPlaylistLoader_.srcUrl\n }\n };\n this.trigger({\n type: 'manifestparsestart',\n metadata\n });\n let newMain;\n try {\n newMain = parseMainXml({\n mainXml: this.mainPlaylistLoader_.mainXml_,\n srcUrl: this.mainPlaylistLoader_.srcUrl,\n clientOffset: this.mainPlaylistLoader_.clientOffset_,\n sidxMapping: this.mainPlaylistLoader_.sidxMapping_,\n previousManifest: oldMain\n });\n } catch (error) {\n this.error = error;\n this.error.metadata = {\n errorType: videojs.Error.StreamingDashManifestParserError,\n error\n };\n this.trigger('error');\n } // if we have an old main to compare the new main against\n\n if (oldMain) {\n newMain = updateMain(oldMain, newMain, this.mainPlaylistLoader_.sidxMapping_);\n } // only update main if we have a new main\n\n this.mainPlaylistLoader_.main = newMain ? newMain : oldMain;\n const location = this.mainPlaylistLoader_.main.locations && this.mainPlaylistLoader_.main.locations[0];\n if (location && location !== this.mainPlaylistLoader_.srcUrl) {\n this.mainPlaylistLoader_.srcUrl = location;\n }\n if (!oldMain || newMain && newMain.minimumUpdatePeriod !== oldMain.minimumUpdatePeriod) {\n this.updateMinimumUpdatePeriodTimeout_();\n }\n this.addEventStreamToMetadataTrack_(newMain);\n if (newMain) {\n const {\n duration,\n endList\n } = newMain;\n const renditions = [];\n newMain.playlists.forEach(playlist => {\n renditions.push({\n id: playlist.id,\n bandwidth: playlist.attributes.BANDWIDTH,\n resolution: playlist.attributes.RESOLUTION,\n codecs: playlist.attributes.CODECS\n });\n });\n const parsedManifest = {\n duration,\n isLive: !endList,\n renditions\n };\n metadata.parsedManifest = parsedManifest;\n this.trigger({\n type: 'manifestparsecomplete',\n metadata\n });\n }\n return Boolean(newMain);\n }\n updateMinimumUpdatePeriodTimeout_() {\n const mpl = this.mainPlaylistLoader_; // cancel any pending creation of mup on media\n // a new one will be added if needed.\n\n if (mpl.createMupOnMedia_) {\n mpl.off('loadedmetadata', mpl.createMupOnMedia_);\n mpl.createMupOnMedia_ = null;\n } // clear any pending timeouts\n\n if (mpl.minimumUpdatePeriodTimeout_) {\n window$1.clearTimeout(mpl.minimumUpdatePeriodTimeout_);\n mpl.minimumUpdatePeriodTimeout_ = null;\n }\n let mup = mpl.main && mpl.main.minimumUpdatePeriod; // If the minimumUpdatePeriod has a value of 0, that indicates that the current\n // MPD has no future validity, so a new one will need to be acquired when new\n // media segments are to be made available. Thus, we use the target duration\n // in this case\n\n if (mup === 0) {\n if (mpl.media()) {\n mup = mpl.media().targetDuration * 1000;\n } else {\n mpl.createMupOnMedia_ = mpl.updateMinimumUpdatePeriodTimeout_;\n mpl.one('loadedmetadata', mpl.createMupOnMedia_);\n }\n } // if minimumUpdatePeriod is invalid or <= zero, which\n // can happen when a live video becomes VOD. skip timeout\n // creation.\n\n if (typeof mup !== 'number' || mup <= 0) {\n if (mup < 0) {\n this.logger_(`found invalid minimumUpdatePeriod of ${mup}, not setting a timeout`);\n }\n return;\n }\n this.createMUPTimeout_(mup);\n }\n createMUPTimeout_(mup) {\n const mpl = this.mainPlaylistLoader_;\n mpl.minimumUpdatePeriodTimeout_ = window$1.setTimeout(() => {\n mpl.minimumUpdatePeriodTimeout_ = null;\n mpl.trigger('minimumUpdatePeriod');\n mpl.createMUPTimeout_(mup);\n }, mup);\n }\n /**\n * Sends request to refresh the main xml and updates the parsed main manifest\n */\n\n refreshXml_() {\n this.requestMain_((req, mainChanged) => {\n if (!mainChanged) {\n return;\n }\n if (this.media_) {\n this.media_ = this.mainPlaylistLoader_.main.playlists[this.media_.id];\n } // This will filter out updated sidx info from the mapping\n\n this.mainPlaylistLoader_.sidxMapping_ = filterChangedSidxMappings(this.mainPlaylistLoader_.main, this.mainPlaylistLoader_.sidxMapping_);\n this.addSidxSegments_(this.media(), this.state, sidxChanged => {\n // TODO: do we need to reload the current playlist?\n this.refreshMedia_(this.media().id);\n });\n });\n }\n /**\n * Refreshes the media playlist by re-parsing the main xml and updating playlist\n * references. If this is an alternate loader, the updated parsed manifest is retrieved\n * from the main loader.\n */\n\n refreshMedia_(mediaID) {\n if (!mediaID) {\n throw new Error('refreshMedia_ must take a media id');\n } // for main we have to reparse the main xml\n // to re-create segments based on current timing values\n // which may change media. We only skip updating the main manifest\n // if this is the first time this.media_ is being set.\n // as main was just parsed in that case.\n\n if (this.media_ && this.isMain_) {\n this.handleMain_();\n }\n const playlists = this.mainPlaylistLoader_.main.playlists;\n const mediaChanged = !this.media_ || this.media_ !== playlists[mediaID];\n if (mediaChanged) {\n this.media_ = playlists[mediaID];\n } else {\n this.trigger('playlistunchanged');\n }\n if (!this.mediaUpdateTimeout) {\n const createMediaUpdateTimeout = () => {\n if (this.media().endList) {\n return;\n }\n this.mediaUpdateTimeout = window$1.setTimeout(() => {\n this.trigger('mediaupdatetimeout');\n createMediaUpdateTimeout();\n }, refreshDelay(this.media(), Boolean(mediaChanged)));\n };\n createMediaUpdateTimeout();\n }\n this.trigger('loadedplaylist');\n }\n /**\n * Takes eventstream data from a parsed DASH manifest and adds it to the metadata text track.\n *\n * @param {manifest} newMain the newly parsed manifest\n */\n\n addEventStreamToMetadataTrack_(newMain) {\n // Only add new event stream metadata if we have a new manifest.\n if (newMain && this.mainPlaylistLoader_.main.eventStream) {\n // convert EventStream to ID3-like data.\n const metadataArray = this.mainPlaylistLoader_.main.eventStream.map(eventStreamNode => {\n return {\n cueTime: eventStreamNode.start,\n frames: [{\n data: eventStreamNode.messageData\n }]\n };\n });\n this.addMetadataToTextTrack('EventStream', metadataArray, this.mainPlaylistLoader_.main.duration);\n }\n }\n /**\n * Returns the key ID set from a playlist\n *\n * @param {playlist} playlist to fetch the key ID set from.\n * @return a Set of 32 digit hex strings that represent the unique keyIds for that playlist.\n */\n\n getKeyIdSet(playlist) {\n if (playlist.contentProtection) {\n const keyIds = new Set();\n for (const keysystem in playlist.contentProtection) {\n const defaultKID = playlist.contentProtection[keysystem].attributes['cenc:default_KID'];\n if (defaultKID) {\n // DASH keyIds are separated by dashes.\n keyIds.add(defaultKID.replace(/-/g, '').toLowerCase());\n }\n }\n return keyIds;\n }\n }\n}\nvar Config = {\n GOAL_BUFFER_LENGTH: 30,\n MAX_GOAL_BUFFER_LENGTH: 60,\n BACK_BUFFER_LENGTH: 30,\n GOAL_BUFFER_LENGTH_RATE: 1,\n // 0.5 MB/s\n INITIAL_BANDWIDTH: 4194304,\n // A fudge factor to apply to advertised playlist bitrates to account for\n // temporary flucations in client bandwidth\n BANDWIDTH_VARIANCE: 1.2,\n // How much of the buffer must be filled before we consider upswitching\n BUFFER_LOW_WATER_LINE: 0,\n MAX_BUFFER_LOW_WATER_LINE: 30,\n // TODO: Remove this when experimentalBufferBasedABR is removed\n EXPERIMENTAL_MAX_BUFFER_LOW_WATER_LINE: 16,\n BUFFER_LOW_WATER_LINE_RATE: 1,\n // If the buffer is greater than the high water line, we won't switch down\n BUFFER_HIGH_WATER_LINE: 30\n};\nconst stringToArrayBuffer = string => {\n const view = new Uint8Array(new ArrayBuffer(string.length));\n for (let i = 0; i < string.length; i++) {\n view[i] = string.charCodeAt(i);\n }\n return view.buffer;\n};\n\n/* global Blob, BlobBuilder, Worker */\n// unify worker interface\nconst browserWorkerPolyFill = function (workerObj) {\n // node only supports on/off\n workerObj.on = workerObj.addEventListener;\n workerObj.off = workerObj.removeEventListener;\n return workerObj;\n};\nconst createObjectURL = function (str) {\n try {\n return URL.createObjectURL(new Blob([str], {\n type: 'application/javascript'\n }));\n } catch (e) {\n const blob = new BlobBuilder();\n blob.append(str);\n return URL.createObjectURL(blob.getBlob());\n }\n};\nconst factory = function (code) {\n return function () {\n const objectUrl = createObjectURL(code);\n const worker = browserWorkerPolyFill(new Worker(objectUrl));\n worker.objURL = objectUrl;\n const terminate = worker.terminate;\n worker.on = worker.addEventListener;\n worker.off = worker.removeEventListener;\n worker.terminate = function () {\n URL.revokeObjectURL(objectUrl);\n return terminate.call(this);\n };\n return worker;\n };\n};\nconst transform = function (code) {\n return `var browserWorkerPolyFill = ${browserWorkerPolyFill.toString()};\\n` + 'browserWorkerPolyFill(self);\\n' + code;\n};\nconst getWorkerString = function (fn) {\n return fn.toString().replace(/^function.+?{/, '').slice(0, -1);\n};\n\n/* rollup-plugin-worker-factory start for worker!/home/runner/work/http-streaming/http-streaming/src/transmuxer-worker.js */\nconst workerCode$1 = transform(getWorkerString(function () {\n var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A lightweight readable stream implemention that handles event dispatching.\n * Objects that inherit from streams should call init in their constructors.\n */\n\n var Stream$8 = function () {\n this.init = function () {\n var listeners = {};\n /**\n * Add a listener for a specified event type.\n * @param type {string} the event name\n * @param listener {function} the callback to be invoked when an event of\n * the specified type occurs\n */\n\n this.on = function (type, listener) {\n if (!listeners[type]) {\n listeners[type] = [];\n }\n listeners[type] = listeners[type].concat(listener);\n };\n /**\n * Remove a listener for a specified event type.\n * @param type {string} the event name\n * @param listener {function} a function previously registered for this\n * type of event through `on`\n */\n\n this.off = function (type, listener) {\n var index;\n if (!listeners[type]) {\n return false;\n }\n index = listeners[type].indexOf(listener);\n listeners[type] = listeners[type].slice();\n listeners[type].splice(index, 1);\n return index > -1;\n };\n /**\n * Trigger an event of the specified type on this stream. Any additional\n * arguments to this function are passed as parameters to event listeners.\n * @param type {string} the event name\n */\n\n this.trigger = function (type) {\n var callbacks, i, length, args;\n callbacks = listeners[type];\n if (!callbacks) {\n return;\n } // Slicing the arguments on every invocation of this method\n // can add a significant amount of overhead. Avoid the\n // intermediate object creation for the common case of a\n // single callback argument\n\n if (arguments.length === 2) {\n length = callbacks.length;\n for (i = 0; i < length; ++i) {\n callbacks[i].call(this, arguments[1]);\n }\n } else {\n args = [];\n i = arguments.length;\n for (i = 1; i < arguments.length; ++i) {\n args.push(arguments[i]);\n }\n length = callbacks.length;\n for (i = 0; i < length; ++i) {\n callbacks[i].apply(this, args);\n }\n }\n };\n /**\n * Destroys the stream and cleans up.\n */\n\n this.dispose = function () {\n listeners = {};\n };\n };\n };\n /**\n * Forwards all `data` events on this stream to the destination stream. The\n * destination stream should provide a method `push` to receive the data\n * events as they arrive.\n * @param destination {stream} the stream that will receive all `data` events\n * @param autoFlush {boolean} if false, we will not call `flush` on the destination\n * when the current stream emits a 'done' event\n * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options\n */\n\n Stream$8.prototype.pipe = function (destination) {\n this.on('data', function (data) {\n destination.push(data);\n });\n this.on('done', function (flushSource) {\n destination.flush(flushSource);\n });\n this.on('partialdone', function (flushSource) {\n destination.partialFlush(flushSource);\n });\n this.on('endedtimeline', function (flushSource) {\n destination.endTimeline(flushSource);\n });\n this.on('reset', function (flushSource) {\n destination.reset(flushSource);\n });\n return destination;\n }; // Default stream functions that are expected to be overridden to perform\n // actual work. These are provided by the prototype as a sort of no-op\n // implementation so that we don't have to check for their existence in the\n // `pipe` function above.\n\n Stream$8.prototype.push = function (data) {\n this.trigger('data', data);\n };\n Stream$8.prototype.flush = function (flushSource) {\n this.trigger('done', flushSource);\n };\n Stream$8.prototype.partialFlush = function (flushSource) {\n this.trigger('partialdone', flushSource);\n };\n Stream$8.prototype.endTimeline = function (flushSource) {\n this.trigger('endedtimeline', flushSource);\n };\n Stream$8.prototype.reset = function (flushSource) {\n this.trigger('reset', flushSource);\n };\n var stream = Stream$8;\n var MAX_UINT32$1 = Math.pow(2, 32);\n var getUint64$3 = function (uint8) {\n var dv = new DataView(uint8.buffer, uint8.byteOffset, uint8.byteLength);\n var value;\n if (dv.getBigUint64) {\n value = dv.getBigUint64(0);\n if (value < Number.MAX_SAFE_INTEGER) {\n return Number(value);\n }\n return value;\n }\n return dv.getUint32(0) * MAX_UINT32$1 + dv.getUint32(4);\n };\n var numbers = {\n getUint64: getUint64$3,\n MAX_UINT32: MAX_UINT32$1\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Functions that generate fragmented MP4s suitable for use with Media\n * Source Extensions.\n */\n\n var MAX_UINT32 = numbers.MAX_UINT32;\n var box, dinf, esds, ftyp, mdat, mfhd, minf, moof, moov, mvex, mvhd, trak, tkhd, mdia, mdhd, hdlr, sdtp, stbl, stsd, traf, trex, trun$1, types, MAJOR_BRAND, MINOR_VERSION, AVC1_BRAND, VIDEO_HDLR, AUDIO_HDLR, HDLR_TYPES, VMHD, SMHD, DREF, STCO, STSC, STSZ, STTS; // pre-calculate constants\n\n (function () {\n var i;\n types = {\n avc1: [],\n // codingname\n avcC: [],\n btrt: [],\n dinf: [],\n dref: [],\n esds: [],\n ftyp: [],\n hdlr: [],\n mdat: [],\n mdhd: [],\n mdia: [],\n mfhd: [],\n minf: [],\n moof: [],\n moov: [],\n mp4a: [],\n // codingname\n mvex: [],\n mvhd: [],\n pasp: [],\n sdtp: [],\n smhd: [],\n stbl: [],\n stco: [],\n stsc: [],\n stsd: [],\n stsz: [],\n stts: [],\n styp: [],\n tfdt: [],\n tfhd: [],\n traf: [],\n trak: [],\n trun: [],\n trex: [],\n tkhd: [],\n vmhd: []\n }; // In environments where Uint8Array is undefined (e.g., IE8), skip set up so that we\n // don't throw an error\n\n if (typeof Uint8Array === 'undefined') {\n return;\n }\n for (i in types) {\n if (types.hasOwnProperty(i)) {\n types[i] = [i.charCodeAt(0), i.charCodeAt(1), i.charCodeAt(2), i.charCodeAt(3)];\n }\n }\n MAJOR_BRAND = new Uint8Array(['i'.charCodeAt(0), 's'.charCodeAt(0), 'o'.charCodeAt(0), 'm'.charCodeAt(0)]);\n AVC1_BRAND = new Uint8Array(['a'.charCodeAt(0), 'v'.charCodeAt(0), 'c'.charCodeAt(0), '1'.charCodeAt(0)]);\n MINOR_VERSION = new Uint8Array([0, 0, 0, 1]);\n VIDEO_HDLR = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0x76, 0x69, 0x64, 0x65,\n // handler_type: 'vide'\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x56, 0x69, 0x64, 0x65, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler'\n ]);\n AUDIO_HDLR = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0x73, 0x6f, 0x75, 0x6e,\n // handler_type: 'soun'\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x53, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler'\n ]);\n HDLR_TYPES = {\n video: VIDEO_HDLR,\n audio: AUDIO_HDLR\n };\n DREF = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01,\n // entry_count\n 0x00, 0x00, 0x00, 0x0c,\n // entry_size\n 0x75, 0x72, 0x6c, 0x20,\n // 'url' type\n 0x00,\n // version 0\n 0x00, 0x00, 0x01 // entry_flags\n ]);\n SMHD = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00,\n // balance, 0 means centered\n 0x00, 0x00 // reserved\n ]);\n STCO = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00 // entry_count\n ]);\n STSC = STCO;\n STSZ = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // sample_size\n 0x00, 0x00, 0x00, 0x00 // sample_count\n ]);\n STTS = STCO;\n VMHD = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x01,\n // flags\n 0x00, 0x00,\n // graphicsmode\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // opcolor\n ]);\n })();\n box = function (type) {\n var payload = [],\n size = 0,\n i,\n result,\n view;\n for (i = 1; i < arguments.length; i++) {\n payload.push(arguments[i]);\n }\n i = payload.length; // calculate the total size we need to allocate\n\n while (i--) {\n size += payload[i].byteLength;\n }\n result = new Uint8Array(size + 8);\n view = new DataView(result.buffer, result.byteOffset, result.byteLength);\n view.setUint32(0, result.byteLength);\n result.set(type, 4); // copy the payload into the result\n\n for (i = 0, size = 8; i < payload.length; i++) {\n result.set(payload[i], size);\n size += payload[i].byteLength;\n }\n return result;\n };\n dinf = function () {\n return box(types.dinf, box(types.dref, DREF));\n };\n esds = function (track) {\n return box(types.esds, new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n // ES_Descriptor\n 0x03,\n // tag, ES_DescrTag\n 0x19,\n // length\n 0x00, 0x00,\n // ES_ID\n 0x00,\n // streamDependenceFlag, URL_flag, reserved, streamPriority\n // DecoderConfigDescriptor\n 0x04,\n // tag, DecoderConfigDescrTag\n 0x11,\n // length\n 0x40,\n // object type\n 0x15,\n // streamType\n 0x00, 0x06, 0x00,\n // bufferSizeDB\n 0x00, 0x00, 0xda, 0xc0,\n // maxBitrate\n 0x00, 0x00, 0xda, 0xc0,\n // avgBitrate\n // DecoderSpecificInfo\n 0x05,\n // tag, DecoderSpecificInfoTag\n 0x02,\n // length\n // ISO/IEC 14496-3, AudioSpecificConfig\n // for samplingFrequencyIndex see ISO/IEC 13818-7:2006, 8.1.3.2.2, Table 35\n track.audioobjecttype << 3 | track.samplingfrequencyindex >>> 1, track.samplingfrequencyindex << 7 | track.channelcount << 3, 0x06, 0x01, 0x02 // GASpecificConfig\n ]));\n };\n ftyp = function () {\n return box(types.ftyp, MAJOR_BRAND, MINOR_VERSION, MAJOR_BRAND, AVC1_BRAND);\n };\n hdlr = function (type) {\n return box(types.hdlr, HDLR_TYPES[type]);\n };\n mdat = function (data) {\n return box(types.mdat, data);\n };\n mdhd = function (track) {\n var result = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x02,\n // creation_time\n 0x00, 0x00, 0x00, 0x03,\n // modification_time\n 0x00, 0x01, 0x5f, 0x90,\n // timescale, 90,000 \"ticks\" per second\n track.duration >>> 24 & 0xFF, track.duration >>> 16 & 0xFF, track.duration >>> 8 & 0xFF, track.duration & 0xFF,\n // duration\n 0x55, 0xc4,\n // 'und' language (undetermined)\n 0x00, 0x00]); // Use the sample rate from the track metadata, when it is\n // defined. The sample rate can be parsed out of an ADTS header, for\n // instance.\n\n if (track.samplerate) {\n result[12] = track.samplerate >>> 24 & 0xFF;\n result[13] = track.samplerate >>> 16 & 0xFF;\n result[14] = track.samplerate >>> 8 & 0xFF;\n result[15] = track.samplerate & 0xFF;\n }\n return box(types.mdhd, result);\n };\n mdia = function (track) {\n return box(types.mdia, mdhd(track), hdlr(track.type), minf(track));\n };\n mfhd = function (sequenceNumber) {\n return box(types.mfhd, new Uint8Array([0x00, 0x00, 0x00, 0x00,\n // flags\n (sequenceNumber & 0xFF000000) >> 24, (sequenceNumber & 0xFF0000) >> 16, (sequenceNumber & 0xFF00) >> 8, sequenceNumber & 0xFF // sequence_number\n ]));\n };\n minf = function (track) {\n return box(types.minf, track.type === 'video' ? box(types.vmhd, VMHD) : box(types.smhd, SMHD), dinf(), stbl(track));\n };\n moof = function (sequenceNumber, tracks) {\n var trackFragments = [],\n i = tracks.length; // build traf boxes for each track fragment\n\n while (i--) {\n trackFragments[i] = traf(tracks[i]);\n }\n return box.apply(null, [types.moof, mfhd(sequenceNumber)].concat(trackFragments));\n };\n /**\n * Returns a movie box.\n * @param tracks {array} the tracks associated with this movie\n * @see ISO/IEC 14496-12:2012(E), section 8.2.1\n */\n\n moov = function (tracks) {\n var i = tracks.length,\n boxes = [];\n while (i--) {\n boxes[i] = trak(tracks[i]);\n }\n return box.apply(null, [types.moov, mvhd(0xffffffff)].concat(boxes).concat(mvex(tracks)));\n };\n mvex = function (tracks) {\n var i = tracks.length,\n boxes = [];\n while (i--) {\n boxes[i] = trex(tracks[i]);\n }\n return box.apply(null, [types.mvex].concat(boxes));\n };\n mvhd = function (duration) {\n var bytes = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01,\n // creation_time\n 0x00, 0x00, 0x00, 0x02,\n // modification_time\n 0x00, 0x01, 0x5f, 0x90,\n // timescale, 90,000 \"ticks\" per second\n (duration & 0xFF000000) >> 24, (duration & 0xFF0000) >> 16, (duration & 0xFF00) >> 8, duration & 0xFF,\n // duration\n 0x00, 0x01, 0x00, 0x00,\n // 1.0 rate\n 0x01, 0x00,\n // 1.0 volume\n 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,\n // transformation: unity matrix\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0xff, 0xff, 0xff, 0xff // next_track_ID\n ]);\n return box(types.mvhd, bytes);\n };\n sdtp = function (track) {\n var samples = track.samples || [],\n bytes = new Uint8Array(4 + samples.length),\n flags,\n i; // leave the full box header (4 bytes) all zero\n // write the sample table\n\n for (i = 0; i < samples.length; i++) {\n flags = samples[i].flags;\n bytes[i + 4] = flags.dependsOn << 4 | flags.isDependedOn << 2 | flags.hasRedundancy;\n }\n return box(types.sdtp, bytes);\n };\n stbl = function (track) {\n return box(types.stbl, stsd(track), box(types.stts, STTS), box(types.stsc, STSC), box(types.stsz, STSZ), box(types.stco, STCO));\n };\n (function () {\n var videoSample, audioSample;\n stsd = function (track) {\n return box(types.stsd, new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01]), track.type === 'video' ? videoSample(track) : audioSample(track));\n };\n videoSample = function (track) {\n var sps = track.sps || [],\n pps = track.pps || [],\n sequenceParameterSets = [],\n pictureParameterSets = [],\n i,\n avc1Box; // assemble the SPSs\n\n for (i = 0; i < sps.length; i++) {\n sequenceParameterSets.push((sps[i].byteLength & 0xFF00) >>> 8);\n sequenceParameterSets.push(sps[i].byteLength & 0xFF); // sequenceParameterSetLength\n\n sequenceParameterSets = sequenceParameterSets.concat(Array.prototype.slice.call(sps[i])); // SPS\n } // assemble the PPSs\n\n for (i = 0; i < pps.length; i++) {\n pictureParameterSets.push((pps[i].byteLength & 0xFF00) >>> 8);\n pictureParameterSets.push(pps[i].byteLength & 0xFF);\n pictureParameterSets = pictureParameterSets.concat(Array.prototype.slice.call(pps[i]));\n }\n avc1Box = [types.avc1, new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // data_reference_index\n 0x00, 0x00,\n // pre_defined\n 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n (track.width & 0xff00) >> 8, track.width & 0xff,\n // width\n (track.height & 0xff00) >> 8, track.height & 0xff,\n // height\n 0x00, 0x48, 0x00, 0x00,\n // horizresolution\n 0x00, 0x48, 0x00, 0x00,\n // vertresolution\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // frame_count\n 0x13, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6a, 0x73, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x69, 0x62, 0x2d, 0x68, 0x6c, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // compressorname\n 0x00, 0x18,\n // depth = 24\n 0x11, 0x11 // pre_defined = -1\n ]), box(types.avcC, new Uint8Array([0x01,\n // configurationVersion\n track.profileIdc,\n // AVCProfileIndication\n track.profileCompatibility,\n // profile_compatibility\n track.levelIdc,\n // AVCLevelIndication\n 0xff // lengthSizeMinusOne, hard-coded to 4 bytes\n ].concat([sps.length],\n // numOfSequenceParameterSets\n sequenceParameterSets,\n // \"SPS\"\n [pps.length],\n // numOfPictureParameterSets\n pictureParameterSets // \"PPS\"\n ))), box(types.btrt, new Uint8Array([0x00, 0x1c, 0x9c, 0x80,\n // bufferSizeDB\n 0x00, 0x2d, 0xc6, 0xc0,\n // maxBitrate\n 0x00, 0x2d, 0xc6, 0xc0 // avgBitrate\n ]))];\n if (track.sarRatio) {\n var hSpacing = track.sarRatio[0],\n vSpacing = track.sarRatio[1];\n avc1Box.push(box(types.pasp, new Uint8Array([(hSpacing & 0xFF000000) >> 24, (hSpacing & 0xFF0000) >> 16, (hSpacing & 0xFF00) >> 8, hSpacing & 0xFF, (vSpacing & 0xFF000000) >> 24, (vSpacing & 0xFF0000) >> 16, (vSpacing & 0xFF00) >> 8, vSpacing & 0xFF])));\n }\n return box.apply(null, avc1Box);\n };\n audioSample = function (track) {\n return box(types.mp4a, new Uint8Array([\n // SampleEntry, ISO/IEC 14496-12\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // data_reference_index\n // AudioSampleEntry, ISO/IEC 14496-12\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n (track.channelcount & 0xff00) >> 8, track.channelcount & 0xff,\n // channelcount\n (track.samplesize & 0xff00) >> 8, track.samplesize & 0xff,\n // samplesize\n 0x00, 0x00,\n // pre_defined\n 0x00, 0x00,\n // reserved\n (track.samplerate & 0xff00) >> 8, track.samplerate & 0xff, 0x00, 0x00 // samplerate, 16.16\n // MP4AudioSampleEntry, ISO/IEC 14496-14\n ]), esds(track));\n };\n })();\n tkhd = function (track) {\n var result = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x07,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // creation_time\n 0x00, 0x00, 0x00, 0x00,\n // modification_time\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF,\n // track_ID\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n (track.duration & 0xFF000000) >> 24, (track.duration & 0xFF0000) >> 16, (track.duration & 0xFF00) >> 8, track.duration & 0xFF,\n // duration\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00,\n // layer\n 0x00, 0x00,\n // alternate_group\n 0x01, 0x00,\n // non-audio track volume\n 0x00, 0x00,\n // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,\n // transformation: unity matrix\n (track.width & 0xFF00) >> 8, track.width & 0xFF, 0x00, 0x00,\n // width\n (track.height & 0xFF00) >> 8, track.height & 0xFF, 0x00, 0x00 // height\n ]);\n return box(types.tkhd, result);\n };\n /**\n * Generate a track fragment (traf) box. A traf box collects metadata\n * about tracks in a movie fragment (moof) box.\n */\n\n traf = function (track) {\n var trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable, dataOffset, upperWordBaseMediaDecodeTime, lowerWordBaseMediaDecodeTime;\n trackFragmentHeader = box(types.tfhd, new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x3a,\n // flags\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF,\n // track_ID\n 0x00, 0x00, 0x00, 0x01,\n // sample_description_index\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_duration\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_size\n 0x00, 0x00, 0x00, 0x00 // default_sample_flags\n ]));\n upperWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime / MAX_UINT32);\n lowerWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime % MAX_UINT32);\n trackFragmentDecodeTime = box(types.tfdt, new Uint8Array([0x01,\n // version 1\n 0x00, 0x00, 0x00,\n // flags\n // baseMediaDecodeTime\n upperWordBaseMediaDecodeTime >>> 24 & 0xFF, upperWordBaseMediaDecodeTime >>> 16 & 0xFF, upperWordBaseMediaDecodeTime >>> 8 & 0xFF, upperWordBaseMediaDecodeTime & 0xFF, lowerWordBaseMediaDecodeTime >>> 24 & 0xFF, lowerWordBaseMediaDecodeTime >>> 16 & 0xFF, lowerWordBaseMediaDecodeTime >>> 8 & 0xFF, lowerWordBaseMediaDecodeTime & 0xFF])); // the data offset specifies the number of bytes from the start of\n // the containing moof to the first payload byte of the associated\n // mdat\n\n dataOffset = 32 +\n // tfhd\n 20 +\n // tfdt\n 8 +\n // traf header\n 16 +\n // mfhd\n 8 +\n // moof header\n 8; // mdat header\n // audio tracks require less metadata\n\n if (track.type === 'audio') {\n trackFragmentRun = trun$1(track, dataOffset);\n return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun);\n } // video tracks should contain an independent and disposable samples\n // box (sdtp)\n // generate one and adjust offsets to match\n\n sampleDependencyTable = sdtp(track);\n trackFragmentRun = trun$1(track, sampleDependencyTable.length + dataOffset);\n return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable);\n };\n /**\n * Generate a track box.\n * @param track {object} a track definition\n * @return {Uint8Array} the track box\n */\n\n trak = function (track) {\n track.duration = track.duration || 0xffffffff;\n return box(types.trak, tkhd(track), mdia(track));\n };\n trex = function (track) {\n var result = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF,\n // track_ID\n 0x00, 0x00, 0x00, 0x01,\n // default_sample_description_index\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_duration\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_size\n 0x00, 0x01, 0x00, 0x01 // default_sample_flags\n ]); // the last two bytes of default_sample_flags is the sample\n // degradation priority, a hint about the importance of this sample\n // relative to others. Lower the degradation priority for all sample\n // types other than video.\n\n if (track.type !== 'video') {\n result[result.length - 1] = 0x00;\n }\n return box(types.trex, result);\n };\n (function () {\n var audioTrun, videoTrun, trunHeader; // This method assumes all samples are uniform. That is, if a\n // duration is present for the first sample, it will be present for\n // all subsequent samples.\n // see ISO/IEC 14496-12:2012, Section 8.8.8.1\n\n trunHeader = function (samples, offset) {\n var durationPresent = 0,\n sizePresent = 0,\n flagsPresent = 0,\n compositionTimeOffset = 0; // trun flag constants\n\n if (samples.length) {\n if (samples[0].duration !== undefined) {\n durationPresent = 0x1;\n }\n if (samples[0].size !== undefined) {\n sizePresent = 0x2;\n }\n if (samples[0].flags !== undefined) {\n flagsPresent = 0x4;\n }\n if (samples[0].compositionTimeOffset !== undefined) {\n compositionTimeOffset = 0x8;\n }\n }\n return [0x00,\n // version 0\n 0x00, durationPresent | sizePresent | flagsPresent | compositionTimeOffset, 0x01,\n // flags\n (samples.length & 0xFF000000) >>> 24, (samples.length & 0xFF0000) >>> 16, (samples.length & 0xFF00) >>> 8, samples.length & 0xFF,\n // sample_count\n (offset & 0xFF000000) >>> 24, (offset & 0xFF0000) >>> 16, (offset & 0xFF00) >>> 8, offset & 0xFF // data_offset\n ];\n };\n videoTrun = function (track, offset) {\n var bytesOffest, bytes, header, samples, sample, i;\n samples = track.samples || [];\n offset += 8 + 12 + 16 * samples.length;\n header = trunHeader(samples, offset);\n bytes = new Uint8Array(header.length + samples.length * 16);\n bytes.set(header);\n bytesOffest = header.length;\n for (i = 0; i < samples.length; i++) {\n sample = samples[i];\n bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration\n\n bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.size & 0xFF; // sample_size\n\n bytes[bytesOffest++] = sample.flags.isLeading << 2 | sample.flags.dependsOn;\n bytes[bytesOffest++] = sample.flags.isDependedOn << 6 | sample.flags.hasRedundancy << 4 | sample.flags.paddingValue << 1 | sample.flags.isNonSyncSample;\n bytes[bytesOffest++] = sample.flags.degradationPriority & 0xF0 << 8;\n bytes[bytesOffest++] = sample.flags.degradationPriority & 0x0F; // sample_flags\n\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.compositionTimeOffset & 0xFF; // sample_composition_time_offset\n }\n return box(types.trun, bytes);\n };\n audioTrun = function (track, offset) {\n var bytes, bytesOffest, header, samples, sample, i;\n samples = track.samples || [];\n offset += 8 + 12 + 8 * samples.length;\n header = trunHeader(samples, offset);\n bytes = new Uint8Array(header.length + samples.length * 8);\n bytes.set(header);\n bytesOffest = header.length;\n for (i = 0; i < samples.length; i++) {\n sample = samples[i];\n bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration\n\n bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.size & 0xFF; // sample_size\n }\n return box(types.trun, bytes);\n };\n trun$1 = function (track, offset) {\n if (track.type === 'audio') {\n return audioTrun(track, offset);\n }\n return videoTrun(track, offset);\n };\n })();\n var mp4Generator = {\n ftyp: ftyp,\n mdat: mdat,\n moof: moof,\n moov: moov,\n initSegment: function (tracks) {\n var fileType = ftyp(),\n movie = moov(tracks),\n result;\n result = new Uint8Array(fileType.byteLength + movie.byteLength);\n result.set(fileType);\n result.set(movie, fileType.byteLength);\n return result;\n }\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n // composed of the nal units that make up that frame\n // Also keep track of cummulative data about the frame from the nal units such\n // as the frame duration, starting pts, etc.\n\n var groupNalsIntoFrames = function (nalUnits) {\n var i,\n currentNal,\n currentFrame = [],\n frames = []; // TODO added for LHLS, make sure this is OK\n\n frames.byteLength = 0;\n frames.nalCount = 0;\n frames.duration = 0;\n currentFrame.byteLength = 0;\n for (i = 0; i < nalUnits.length; i++) {\n currentNal = nalUnits[i]; // Split on 'aud'-type nal units\n\n if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {\n // Since the very first nal unit is expected to be an AUD\n // only push to the frames array when currentFrame is not empty\n if (currentFrame.length) {\n currentFrame.duration = currentNal.dts - currentFrame.dts; // TODO added for LHLS, make sure this is OK\n\n frames.byteLength += currentFrame.byteLength;\n frames.nalCount += currentFrame.length;\n frames.duration += currentFrame.duration;\n frames.push(currentFrame);\n }\n currentFrame = [currentNal];\n currentFrame.byteLength = currentNal.data.byteLength;\n currentFrame.pts = currentNal.pts;\n currentFrame.dts = currentNal.dts;\n } else {\n // Specifically flag key frames for ease of use later\n if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {\n currentFrame.keyFrame = true;\n }\n currentFrame.duration = currentNal.dts - currentFrame.dts;\n currentFrame.byteLength += currentNal.data.byteLength;\n currentFrame.push(currentNal);\n }\n } // For the last frame, use the duration of the previous frame if we\n // have nothing better to go on\n\n if (frames.length && (!currentFrame.duration || currentFrame.duration <= 0)) {\n currentFrame.duration = frames[frames.length - 1].duration;\n } // Push the final frame\n // TODO added for LHLS, make sure this is OK\n\n frames.byteLength += currentFrame.byteLength;\n frames.nalCount += currentFrame.length;\n frames.duration += currentFrame.duration;\n frames.push(currentFrame);\n return frames;\n }; // Convert an array of frames into an array of Gop with each Gop being composed\n // of the frames that make up that Gop\n // Also keep track of cummulative data about the Gop from the frames such as the\n // Gop duration, starting pts, etc.\n\n var groupFramesIntoGops = function (frames) {\n var i,\n currentFrame,\n currentGop = [],\n gops = []; // We must pre-set some of the values on the Gop since we\n // keep running totals of these values\n\n currentGop.byteLength = 0;\n currentGop.nalCount = 0;\n currentGop.duration = 0;\n currentGop.pts = frames[0].pts;\n currentGop.dts = frames[0].dts; // store some metadata about all the Gops\n\n gops.byteLength = 0;\n gops.nalCount = 0;\n gops.duration = 0;\n gops.pts = frames[0].pts;\n gops.dts = frames[0].dts;\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n if (currentFrame.keyFrame) {\n // Since the very first frame is expected to be an keyframe\n // only push to the gops array when currentGop is not empty\n if (currentGop.length) {\n gops.push(currentGop);\n gops.byteLength += currentGop.byteLength;\n gops.nalCount += currentGop.nalCount;\n gops.duration += currentGop.duration;\n }\n currentGop = [currentFrame];\n currentGop.nalCount = currentFrame.length;\n currentGop.byteLength = currentFrame.byteLength;\n currentGop.pts = currentFrame.pts;\n currentGop.dts = currentFrame.dts;\n currentGop.duration = currentFrame.duration;\n } else {\n currentGop.duration += currentFrame.duration;\n currentGop.nalCount += currentFrame.length;\n currentGop.byteLength += currentFrame.byteLength;\n currentGop.push(currentFrame);\n }\n }\n if (gops.length && currentGop.duration <= 0) {\n currentGop.duration = gops[gops.length - 1].duration;\n }\n gops.byteLength += currentGop.byteLength;\n gops.nalCount += currentGop.nalCount;\n gops.duration += currentGop.duration; // push the final Gop\n\n gops.push(currentGop);\n return gops;\n };\n /*\n * Search for the first keyframe in the GOPs and throw away all frames\n * until that keyframe. Then extend the duration of the pulled keyframe\n * and pull the PTS and DTS of the keyframe so that it covers the time\n * range of the frames that were disposed.\n *\n * @param {Array} gops video GOPs\n * @returns {Array} modified video GOPs\n */\n\n var extendFirstKeyFrame = function (gops) {\n var currentGop;\n if (!gops[0][0].keyFrame && gops.length > 1) {\n // Remove the first GOP\n currentGop = gops.shift();\n gops.byteLength -= currentGop.byteLength;\n gops.nalCount -= currentGop.nalCount; // Extend the first frame of what is now the\n // first gop to cover the time period of the\n // frames we just removed\n\n gops[0][0].dts = currentGop.dts;\n gops[0][0].pts = currentGop.pts;\n gops[0][0].duration += currentGop.duration;\n }\n return gops;\n };\n /**\n * Default sample object\n * see ISO/IEC 14496-12:2012, section 8.6.4.3\n */\n\n var createDefaultSample = function () {\n return {\n size: 0,\n flags: {\n isLeading: 0,\n dependsOn: 1,\n isDependedOn: 0,\n hasRedundancy: 0,\n degradationPriority: 0,\n isNonSyncSample: 1\n }\n };\n };\n /*\n * Collates information from a video frame into an object for eventual\n * entry into an MP4 sample table.\n *\n * @param {Object} frame the video frame\n * @param {Number} dataOffset the byte offset to position the sample\n * @return {Object} object containing sample table info for a frame\n */\n\n var sampleForFrame = function (frame, dataOffset) {\n var sample = createDefaultSample();\n sample.dataOffset = dataOffset;\n sample.compositionTimeOffset = frame.pts - frame.dts;\n sample.duration = frame.duration;\n sample.size = 4 * frame.length; // Space for nal unit size\n\n sample.size += frame.byteLength;\n if (frame.keyFrame) {\n sample.flags.dependsOn = 2;\n sample.flags.isNonSyncSample = 0;\n }\n return sample;\n }; // generate the track's sample table from an array of gops\n\n var generateSampleTable$1 = function (gops, baseDataOffset) {\n var h,\n i,\n sample,\n currentGop,\n currentFrame,\n dataOffset = baseDataOffset || 0,\n samples = [];\n for (h = 0; h < gops.length; h++) {\n currentGop = gops[h];\n for (i = 0; i < currentGop.length; i++) {\n currentFrame = currentGop[i];\n sample = sampleForFrame(currentFrame, dataOffset);\n dataOffset += sample.size;\n samples.push(sample);\n }\n }\n return samples;\n }; // generate the track's raw mdat data from an array of gops\n\n var concatenateNalData = function (gops) {\n var h,\n i,\n j,\n currentGop,\n currentFrame,\n currentNal,\n dataOffset = 0,\n nalsByteLength = gops.byteLength,\n numberOfNals = gops.nalCount,\n totalByteLength = nalsByteLength + 4 * numberOfNals,\n data = new Uint8Array(totalByteLength),\n view = new DataView(data.buffer); // For each Gop..\n\n for (h = 0; h < gops.length; h++) {\n currentGop = gops[h]; // For each Frame..\n\n for (i = 0; i < currentGop.length; i++) {\n currentFrame = currentGop[i]; // For each NAL..\n\n for (j = 0; j < currentFrame.length; j++) {\n currentNal = currentFrame[j];\n view.setUint32(dataOffset, currentNal.data.byteLength);\n dataOffset += 4;\n data.set(currentNal.data, dataOffset);\n dataOffset += currentNal.data.byteLength;\n }\n }\n }\n return data;\n }; // generate the track's sample table from a frame\n\n var generateSampleTableForFrame = function (frame, baseDataOffset) {\n var sample,\n dataOffset = baseDataOffset || 0,\n samples = [];\n sample = sampleForFrame(frame, dataOffset);\n samples.push(sample);\n return samples;\n }; // generate the track's raw mdat data from a frame\n\n var concatenateNalDataForFrame = function (frame) {\n var i,\n currentNal,\n dataOffset = 0,\n nalsByteLength = frame.byteLength,\n numberOfNals = frame.length,\n totalByteLength = nalsByteLength + 4 * numberOfNals,\n data = new Uint8Array(totalByteLength),\n view = new DataView(data.buffer); // For each NAL..\n\n for (i = 0; i < frame.length; i++) {\n currentNal = frame[i];\n view.setUint32(dataOffset, currentNal.data.byteLength);\n dataOffset += 4;\n data.set(currentNal.data, dataOffset);\n dataOffset += currentNal.data.byteLength;\n }\n return data;\n };\n var frameUtils$1 = {\n groupNalsIntoFrames: groupNalsIntoFrames,\n groupFramesIntoGops: groupFramesIntoGops,\n extendFirstKeyFrame: extendFirstKeyFrame,\n generateSampleTable: generateSampleTable$1,\n concatenateNalData: concatenateNalData,\n generateSampleTableForFrame: generateSampleTableForFrame,\n concatenateNalDataForFrame: concatenateNalDataForFrame\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var highPrefix = [33, 16, 5, 32, 164, 27];\n var lowPrefix = [33, 65, 108, 84, 1, 2, 4, 8, 168, 2, 4, 8, 17, 191, 252];\n var zeroFill = function (count) {\n var a = [];\n while (count--) {\n a.push(0);\n }\n return a;\n };\n var makeTable = function (metaTable) {\n return Object.keys(metaTable).reduce(function (obj, key) {\n obj[key] = new Uint8Array(metaTable[key].reduce(function (arr, part) {\n return arr.concat(part);\n }, []));\n return obj;\n }, {});\n };\n var silence;\n var silence_1 = function () {\n if (!silence) {\n // Frames-of-silence to use for filling in missing AAC frames\n var coneOfSilence = {\n 96000: [highPrefix, [227, 64], zeroFill(154), [56]],\n 88200: [highPrefix, [231], zeroFill(170), [56]],\n 64000: [highPrefix, [248, 192], zeroFill(240), [56]],\n 48000: [highPrefix, [255, 192], zeroFill(268), [55, 148, 128], zeroFill(54), [112]],\n 44100: [highPrefix, [255, 192], zeroFill(268), [55, 163, 128], zeroFill(84), [112]],\n 32000: [highPrefix, [255, 192], zeroFill(268), [55, 234], zeroFill(226), [112]],\n 24000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 112], zeroFill(126), [224]],\n 16000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 255], zeroFill(269), [223, 108], zeroFill(195), [1, 192]],\n 12000: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 253, 128], zeroFill(259), [56]],\n 11025: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 255, 192], zeroFill(268), [55, 175, 128], zeroFill(108), [112]],\n 8000: [lowPrefix, zeroFill(268), [3, 121, 16], zeroFill(47), [7]]\n };\n silence = makeTable(coneOfSilence);\n }\n return silence;\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ONE_SECOND_IN_TS$4 = 90000,\n // 90kHz clock\n secondsToVideoTs,\n secondsToAudioTs,\n videoTsToSeconds,\n audioTsToSeconds,\n audioTsToVideoTs,\n videoTsToAudioTs,\n metadataTsToSeconds;\n secondsToVideoTs = function (seconds) {\n return seconds * ONE_SECOND_IN_TS$4;\n };\n secondsToAudioTs = function (seconds, sampleRate) {\n return seconds * sampleRate;\n };\n videoTsToSeconds = function (timestamp) {\n return timestamp / ONE_SECOND_IN_TS$4;\n };\n audioTsToSeconds = function (timestamp, sampleRate) {\n return timestamp / sampleRate;\n };\n audioTsToVideoTs = function (timestamp, sampleRate) {\n return secondsToVideoTs(audioTsToSeconds(timestamp, sampleRate));\n };\n videoTsToAudioTs = function (timestamp, sampleRate) {\n return secondsToAudioTs(videoTsToSeconds(timestamp), sampleRate);\n };\n /**\n * Adjust ID3 tag or caption timing information by the timeline pts values\n * (if keepOriginalTimestamps is false) and convert to seconds\n */\n\n metadataTsToSeconds = function (timestamp, timelineStartPts, keepOriginalTimestamps) {\n return videoTsToSeconds(keepOriginalTimestamps ? timestamp : timestamp - timelineStartPts);\n };\n var clock$2 = {\n ONE_SECOND_IN_TS: ONE_SECOND_IN_TS$4,\n secondsToVideoTs: secondsToVideoTs,\n secondsToAudioTs: secondsToAudioTs,\n videoTsToSeconds: videoTsToSeconds,\n audioTsToSeconds: audioTsToSeconds,\n audioTsToVideoTs: audioTsToVideoTs,\n videoTsToAudioTs: videoTsToAudioTs,\n metadataTsToSeconds: metadataTsToSeconds\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var coneOfSilence = silence_1;\n var clock$1 = clock$2;\n /**\n * Sum the `byteLength` properties of the data in each AAC frame\n */\n\n var sumFrameByteLengths = function (array) {\n var i,\n currentObj,\n sum = 0; // sum the byteLength's all each nal unit in the frame\n\n for (i = 0; i < array.length; i++) {\n currentObj = array[i];\n sum += currentObj.data.byteLength;\n }\n return sum;\n }; // Possibly pad (prefix) the audio track with silence if appending this track\n // would lead to the introduction of a gap in the audio buffer\n\n var prefixWithSilence = function (track, frames, audioAppendStartTs, videoBaseMediaDecodeTime) {\n var baseMediaDecodeTimeTs,\n frameDuration = 0,\n audioGapDuration = 0,\n audioFillFrameCount = 0,\n audioFillDuration = 0,\n silentFrame,\n i,\n firstFrame;\n if (!frames.length) {\n return;\n }\n baseMediaDecodeTimeTs = clock$1.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate); // determine frame clock duration based on sample rate, round up to avoid overfills\n\n frameDuration = Math.ceil(clock$1.ONE_SECOND_IN_TS / (track.samplerate / 1024));\n if (audioAppendStartTs && videoBaseMediaDecodeTime) {\n // insert the shortest possible amount (audio gap or audio to video gap)\n audioGapDuration = baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime); // number of full frames in the audio gap\n\n audioFillFrameCount = Math.floor(audioGapDuration / frameDuration);\n audioFillDuration = audioFillFrameCount * frameDuration;\n } // don't attempt to fill gaps smaller than a single frame or larger\n // than a half second\n\n if (audioFillFrameCount < 1 || audioFillDuration > clock$1.ONE_SECOND_IN_TS / 2) {\n return;\n }\n silentFrame = coneOfSilence()[track.samplerate];\n if (!silentFrame) {\n // we don't have a silent frame pregenerated for the sample rate, so use a frame\n // from the content instead\n silentFrame = frames[0].data;\n }\n for (i = 0; i < audioFillFrameCount; i++) {\n firstFrame = frames[0];\n frames.splice(0, 0, {\n data: silentFrame,\n dts: firstFrame.dts - frameDuration,\n pts: firstFrame.pts - frameDuration\n });\n }\n track.baseMediaDecodeTime -= Math.floor(clock$1.videoTsToAudioTs(audioFillDuration, track.samplerate));\n return audioFillDuration;\n }; // If the audio segment extends before the earliest allowed dts\n // value, remove AAC frames until starts at or after the earliest\n // allowed DTS so that we don't end up with a negative baseMedia-\n // DecodeTime for the audio track\n\n var trimAdtsFramesByEarliestDts = function (adtsFrames, track, earliestAllowedDts) {\n if (track.minSegmentDts >= earliestAllowedDts) {\n return adtsFrames;\n } // We will need to recalculate the earliest segment Dts\n\n track.minSegmentDts = Infinity;\n return adtsFrames.filter(function (currentFrame) {\n // If this is an allowed frame, keep it and record it's Dts\n if (currentFrame.dts >= earliestAllowedDts) {\n track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts);\n track.minSegmentPts = track.minSegmentDts;\n return true;\n } // Otherwise, discard it\n\n return false;\n });\n }; // generate the track's raw mdat data from an array of frames\n\n var generateSampleTable = function (frames) {\n var i,\n currentFrame,\n samples = [];\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n samples.push({\n size: currentFrame.data.byteLength,\n duration: 1024 // For AAC audio, all samples contain 1024 samples\n });\n }\n return samples;\n }; // generate the track's sample table from an array of frames\n\n var concatenateFrameData = function (frames) {\n var i,\n currentFrame,\n dataOffset = 0,\n data = new Uint8Array(sumFrameByteLengths(frames));\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n data.set(currentFrame.data, dataOffset);\n dataOffset += currentFrame.data.byteLength;\n }\n return data;\n };\n var audioFrameUtils$1 = {\n prefixWithSilence: prefixWithSilence,\n trimAdtsFramesByEarliestDts: trimAdtsFramesByEarliestDts,\n generateSampleTable: generateSampleTable,\n concatenateFrameData: concatenateFrameData\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ONE_SECOND_IN_TS$3 = clock$2.ONE_SECOND_IN_TS;\n /**\n * Store information about the start and end of the track and the\n * duration for each frame/sample we process in order to calculate\n * the baseMediaDecodeTime\n */\n\n var collectDtsInfo = function (track, data) {\n if (typeof data.pts === 'number') {\n if (track.timelineStartInfo.pts === undefined) {\n track.timelineStartInfo.pts = data.pts;\n }\n if (track.minSegmentPts === undefined) {\n track.minSegmentPts = data.pts;\n } else {\n track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);\n }\n if (track.maxSegmentPts === undefined) {\n track.maxSegmentPts = data.pts;\n } else {\n track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);\n }\n }\n if (typeof data.dts === 'number') {\n if (track.timelineStartInfo.dts === undefined) {\n track.timelineStartInfo.dts = data.dts;\n }\n if (track.minSegmentDts === undefined) {\n track.minSegmentDts = data.dts;\n } else {\n track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);\n }\n if (track.maxSegmentDts === undefined) {\n track.maxSegmentDts = data.dts;\n } else {\n track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);\n }\n }\n };\n /**\n * Clear values used to calculate the baseMediaDecodeTime between\n * tracks\n */\n\n var clearDtsInfo = function (track) {\n delete track.minSegmentDts;\n delete track.maxSegmentDts;\n delete track.minSegmentPts;\n delete track.maxSegmentPts;\n };\n /**\n * Calculate the track's baseMediaDecodeTime based on the earliest\n * DTS the transmuxer has ever seen and the minimum DTS for the\n * current track\n * @param track {object} track metadata configuration\n * @param keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n var calculateTrackBaseMediaDecodeTime = function (track, keepOriginalTimestamps) {\n var baseMediaDecodeTime,\n scale,\n minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.\n\n if (!keepOriginalTimestamps) {\n minSegmentDts -= track.timelineStartInfo.dts;\n } // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where\n // we want the start of the first segment to be placed\n\n baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first\n\n baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative\n\n baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);\n if (track.type === 'audio') {\n // Audio has a different clock equal to the sampling_rate so we need to\n // scale the PTS values into the clock rate of the track\n scale = track.samplerate / ONE_SECOND_IN_TS$3;\n baseMediaDecodeTime *= scale;\n baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);\n }\n return baseMediaDecodeTime;\n };\n var trackDecodeInfo$1 = {\n clearDtsInfo: clearDtsInfo,\n calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,\n collectDtsInfo: collectDtsInfo\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Reads in-band caption information from a video elementary\n * stream. Captions must follow the CEA-708 standard for injection\n * into an MPEG-2 transport streams.\n * @see https://en.wikipedia.org/wiki/CEA-708\n * @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf\n */\n // payload type field to indicate how they are to be\n // interpreted. CEAS-708 caption content is always transmitted with\n // payload type 0x04.\n\n var USER_DATA_REGISTERED_ITU_T_T35 = 4,\n RBSP_TRAILING_BITS = 128;\n /**\n * Parse a supplemental enhancement information (SEI) NAL unit.\n * Stops parsing once a message of type ITU T T35 has been found.\n *\n * @param bytes {Uint8Array} the bytes of a SEI NAL unit\n * @return {object} the parsed SEI payload\n * @see Rec. ITU-T H.264, 7.3.2.3.1\n */\n\n var parseSei = function (bytes) {\n var i = 0,\n result = {\n payloadType: -1,\n payloadSize: 0\n },\n payloadType = 0,\n payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message\n\n while (i < bytes.byteLength) {\n // stop once we have hit the end of the sei_rbsp\n if (bytes[i] === RBSP_TRAILING_BITS) {\n break;\n } // Parse payload type\n\n while (bytes[i] === 0xFF) {\n payloadType += 255;\n i++;\n }\n payloadType += bytes[i++]; // Parse payload size\n\n while (bytes[i] === 0xFF) {\n payloadSize += 255;\n i++;\n }\n payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break\n // there can only ever be one caption message in a frame's sei\n\n if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {\n var userIdentifier = String.fromCharCode(bytes[i + 3], bytes[i + 4], bytes[i + 5], bytes[i + 6]);\n if (userIdentifier === 'GA94') {\n result.payloadType = payloadType;\n result.payloadSize = payloadSize;\n result.payload = bytes.subarray(i, i + payloadSize);\n break;\n } else {\n result.payload = void 0;\n }\n } // skip the payload and parse the next message\n\n i += payloadSize;\n payloadType = 0;\n payloadSize = 0;\n }\n return result;\n }; // see ANSI/SCTE 128-1 (2013), section 8.1\n\n var parseUserData = function (sei) {\n // itu_t_t35_contry_code must be 181 (United States) for\n // captions\n if (sei.payload[0] !== 181) {\n return null;\n } // itu_t_t35_provider_code should be 49 (ATSC) for captions\n\n if ((sei.payload[1] << 8 | sei.payload[2]) !== 49) {\n return null;\n } // the user_identifier should be \"GA94\" to indicate ATSC1 data\n\n if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') {\n return null;\n } // finally, user_data_type_code should be 0x03 for caption data\n\n if (sei.payload[7] !== 0x03) {\n return null;\n } // return the user_data_type_structure and strip the trailing\n // marker bits\n\n return sei.payload.subarray(8, sei.payload.length - 1);\n }; // see CEA-708-D, section 4.4\n\n var parseCaptionPackets = function (pts, userData) {\n var results = [],\n i,\n count,\n offset,\n data; // if this is just filler, return immediately\n\n if (!(userData[0] & 0x40)) {\n return results;\n } // parse out the cc_data_1 and cc_data_2 fields\n\n count = userData[0] & 0x1f;\n for (i = 0; i < count; i++) {\n offset = i * 3;\n data = {\n type: userData[offset + 2] & 0x03,\n pts: pts\n }; // capture cc data when cc_valid is 1\n\n if (userData[offset + 2] & 0x04) {\n data.ccData = userData[offset + 3] << 8 | userData[offset + 4];\n results.push(data);\n }\n }\n return results;\n };\n var discardEmulationPreventionBytes$1 = function (data) {\n var length = data.byteLength,\n emulationPreventionBytesPositions = [],\n i = 1,\n newLength,\n newData; // Find all `Emulation Prevention Bytes`\n\n while (i < length - 2) {\n if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {\n emulationPreventionBytesPositions.push(i + 2);\n i += 2;\n } else {\n i++;\n }\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n if (emulationPreventionBytesPositions.length === 0) {\n return data;\n } // Create a new array to hold the NAL unit data\n\n newLength = length - emulationPreventionBytesPositions.length;\n newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === emulationPreventionBytesPositions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n emulationPreventionBytesPositions.shift();\n }\n newData[i] = data[sourceIndex];\n }\n return newData;\n }; // exports\n\n var captionPacketParser = {\n parseSei: parseSei,\n parseUserData: parseUserData,\n parseCaptionPackets: parseCaptionPackets,\n discardEmulationPreventionBytes: discardEmulationPreventionBytes$1,\n USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Reads in-band caption information from a video elementary\n * stream. Captions must follow the CEA-708 standard for injection\n * into an MPEG-2 transport streams.\n * @see https://en.wikipedia.org/wiki/CEA-708\n * @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf\n */\n // Link To Transport\n // -----------------\n\n var Stream$7 = stream;\n var cea708Parser = captionPacketParser;\n var CaptionStream$2 = function (options) {\n options = options || {};\n CaptionStream$2.prototype.init.call(this); // parse708captions flag, default to true\n\n this.parse708captions_ = typeof options.parse708captions === 'boolean' ? options.parse708captions : true;\n this.captionPackets_ = [];\n this.ccStreams_ = [new Cea608Stream(0, 0),\n // eslint-disable-line no-use-before-define\n new Cea608Stream(0, 1),\n // eslint-disable-line no-use-before-define\n new Cea608Stream(1, 0),\n // eslint-disable-line no-use-before-define\n new Cea608Stream(1, 1) // eslint-disable-line no-use-before-define\n ];\n if (this.parse708captions_) {\n this.cc708Stream_ = new Cea708Stream({\n captionServices: options.captionServices\n }); // eslint-disable-line no-use-before-define\n }\n this.reset(); // forward data and done events from CCs to this CaptionStream\n\n this.ccStreams_.forEach(function (cc) {\n cc.on('data', this.trigger.bind(this, 'data'));\n cc.on('partialdone', this.trigger.bind(this, 'partialdone'));\n cc.on('done', this.trigger.bind(this, 'done'));\n }, this);\n if (this.parse708captions_) {\n this.cc708Stream_.on('data', this.trigger.bind(this, 'data'));\n this.cc708Stream_.on('partialdone', this.trigger.bind(this, 'partialdone'));\n this.cc708Stream_.on('done', this.trigger.bind(this, 'done'));\n }\n };\n CaptionStream$2.prototype = new Stream$7();\n CaptionStream$2.prototype.push = function (event) {\n var sei, userData, newCaptionPackets; // only examine SEI NALs\n\n if (event.nalUnitType !== 'sei_rbsp') {\n return;\n } // parse the sei\n\n sei = cea708Parser.parseSei(event.escapedRBSP); // no payload data, skip\n\n if (!sei.payload) {\n return;\n } // ignore everything but user_data_registered_itu_t_t35\n\n if (sei.payloadType !== cea708Parser.USER_DATA_REGISTERED_ITU_T_T35) {\n return;\n } // parse out the user data payload\n\n userData = cea708Parser.parseUserData(sei); // ignore unrecognized userData\n\n if (!userData) {\n return;\n } // Sometimes, the same segment # will be downloaded twice. To stop the\n // caption data from being processed twice, we track the latest dts we've\n // received and ignore everything with a dts before that. However, since\n // data for a specific dts can be split across packets on either side of\n // a segment boundary, we need to make sure we *don't* ignore the packets\n // from the *next* segment that have dts === this.latestDts_. By constantly\n // tracking the number of packets received with dts === this.latestDts_, we\n // know how many should be ignored once we start receiving duplicates.\n\n if (event.dts < this.latestDts_) {\n // We've started getting older data, so set the flag.\n this.ignoreNextEqualDts_ = true;\n return;\n } else if (event.dts === this.latestDts_ && this.ignoreNextEqualDts_) {\n this.numSameDts_--;\n if (!this.numSameDts_) {\n // We've received the last duplicate packet, time to start processing again\n this.ignoreNextEqualDts_ = false;\n }\n return;\n } // parse out CC data packets and save them for later\n\n newCaptionPackets = cea708Parser.parseCaptionPackets(event.pts, userData);\n this.captionPackets_ = this.captionPackets_.concat(newCaptionPackets);\n if (this.latestDts_ !== event.dts) {\n this.numSameDts_ = 0;\n }\n this.numSameDts_++;\n this.latestDts_ = event.dts;\n };\n CaptionStream$2.prototype.flushCCStreams = function (flushType) {\n this.ccStreams_.forEach(function (cc) {\n return flushType === 'flush' ? cc.flush() : cc.partialFlush();\n }, this);\n };\n CaptionStream$2.prototype.flushStream = function (flushType) {\n // make sure we actually parsed captions before proceeding\n if (!this.captionPackets_.length) {\n this.flushCCStreams(flushType);\n return;\n } // In Chrome, the Array#sort function is not stable so add a\n // presortIndex that we can use to ensure we get a stable-sort\n\n this.captionPackets_.forEach(function (elem, idx) {\n elem.presortIndex = idx;\n }); // sort caption byte-pairs based on their PTS values\n\n this.captionPackets_.sort(function (a, b) {\n if (a.pts === b.pts) {\n return a.presortIndex - b.presortIndex;\n }\n return a.pts - b.pts;\n });\n this.captionPackets_.forEach(function (packet) {\n if (packet.type < 2) {\n // Dispatch packet to the right Cea608Stream\n this.dispatchCea608Packet(packet);\n } else {\n // Dispatch packet to the Cea708Stream\n this.dispatchCea708Packet(packet);\n }\n }, this);\n this.captionPackets_.length = 0;\n this.flushCCStreams(flushType);\n };\n CaptionStream$2.prototype.flush = function () {\n return this.flushStream('flush');\n }; // Only called if handling partial data\n\n CaptionStream$2.prototype.partialFlush = function () {\n return this.flushStream('partialFlush');\n };\n CaptionStream$2.prototype.reset = function () {\n this.latestDts_ = null;\n this.ignoreNextEqualDts_ = false;\n this.numSameDts_ = 0;\n this.activeCea608Channel_ = [null, null];\n this.ccStreams_.forEach(function (ccStream) {\n ccStream.reset();\n });\n }; // From the CEA-608 spec:\n\n /*\n * When XDS sub-packets are interleaved with other services, the end of each sub-packet shall be followed\n * by a control pair to change to a different service. When any of the control codes from 0x10 to 0x1F is\n * used to begin a control code pair, it indicates the return to captioning or Text data. The control code pair\n * and subsequent data should then be processed according to the FCC rules. It may be necessary for the\n * line 21 data encoder to automatically insert a control code pair (i.e. RCL, RU2, RU3, RU4, RDC, or RTD)\n * to switch to captioning or Text.\n */\n // With that in mind, we ignore any data between an XDS control code and a\n // subsequent closed-captioning control code.\n\n CaptionStream$2.prototype.dispatchCea608Packet = function (packet) {\n // NOTE: packet.type is the CEA608 field\n if (this.setsTextOrXDSActive(packet)) {\n this.activeCea608Channel_[packet.type] = null;\n } else if (this.setsChannel1Active(packet)) {\n this.activeCea608Channel_[packet.type] = 0;\n } else if (this.setsChannel2Active(packet)) {\n this.activeCea608Channel_[packet.type] = 1;\n }\n if (this.activeCea608Channel_[packet.type] === null) {\n // If we haven't received anything to set the active channel, or the\n // packets are Text/XDS data, discard the data; we don't want jumbled\n // captions\n return;\n }\n this.ccStreams_[(packet.type << 1) + this.activeCea608Channel_[packet.type]].push(packet);\n };\n CaptionStream$2.prototype.setsChannel1Active = function (packet) {\n return (packet.ccData & 0x7800) === 0x1000;\n };\n CaptionStream$2.prototype.setsChannel2Active = function (packet) {\n return (packet.ccData & 0x7800) === 0x1800;\n };\n CaptionStream$2.prototype.setsTextOrXDSActive = function (packet) {\n return (packet.ccData & 0x7100) === 0x0100 || (packet.ccData & 0x78fe) === 0x102a || (packet.ccData & 0x78fe) === 0x182a;\n };\n CaptionStream$2.prototype.dispatchCea708Packet = function (packet) {\n if (this.parse708captions_) {\n this.cc708Stream_.push(packet);\n }\n }; // ----------------------\n // Session to Application\n // ----------------------\n // This hash maps special and extended character codes to their\n // proper Unicode equivalent. The first one-byte key is just a\n // non-standard character code. The two-byte keys that follow are\n // the extended CEA708 character codes, along with the preceding\n // 0x10 extended character byte to distinguish these codes from\n // non-extended character codes. Every CEA708 character code that\n // is not in this object maps directly to a standard unicode\n // character code.\n // The transparent space and non-breaking transparent space are\n // technically not fully supported since there is no code to\n // make them transparent, so they have normal non-transparent\n // stand-ins.\n // The special closed caption (CC) character isn't a standard\n // unicode character, so a fairly similar unicode character was\n // chosen in it's place.\n\n var CHARACTER_TRANSLATION_708 = {\n 0x7f: 0x266a,\n // \u266A\n 0x1020: 0x20,\n // Transparent Space\n 0x1021: 0xa0,\n // Nob-breaking Transparent Space\n 0x1025: 0x2026,\n // \u2026\n 0x102a: 0x0160,\n // \u0160\n 0x102c: 0x0152,\n // \u0152\n 0x1030: 0x2588,\n // \u2588\n 0x1031: 0x2018,\n // \u2018\n 0x1032: 0x2019,\n // \u2019\n 0x1033: 0x201c,\n // \u201C\n 0x1034: 0x201d,\n // \u201D\n 0x1035: 0x2022,\n // \u2022\n 0x1039: 0x2122,\n // \u2122\n 0x103a: 0x0161,\n // \u0161\n 0x103c: 0x0153,\n // \u0153\n 0x103d: 0x2120,\n // \u2120\n 0x103f: 0x0178,\n // \u0178\n 0x1076: 0x215b,\n // \u215B\n 0x1077: 0x215c,\n // \u215C\n 0x1078: 0x215d,\n // \u215D\n 0x1079: 0x215e,\n // \u215E\n 0x107a: 0x23d0,\n // \u23D0\n 0x107b: 0x23a4,\n // \u23A4\n 0x107c: 0x23a3,\n // \u23A3\n 0x107d: 0x23af,\n // \u23AF\n 0x107e: 0x23a6,\n // \u23A6\n 0x107f: 0x23a1,\n // \u23A1\n 0x10a0: 0x3138 // \u3138 (CC char)\n };\n var get708CharFromCode = function (code) {\n var newCode = CHARACTER_TRANSLATION_708[code] || code;\n if (code & 0x1000 && code === newCode) {\n // Invalid extended code\n return '';\n }\n return String.fromCharCode(newCode);\n };\n var within708TextBlock = function (b) {\n return 0x20 <= b && b <= 0x7f || 0xa0 <= b && b <= 0xff;\n };\n var Cea708Window = function (windowNum) {\n this.windowNum = windowNum;\n this.reset();\n };\n Cea708Window.prototype.reset = function () {\n this.clearText();\n this.pendingNewLine = false;\n this.winAttr = {};\n this.penAttr = {};\n this.penLoc = {};\n this.penColor = {}; // These default values are arbitrary,\n // defineWindow will usually override them\n\n this.visible = 0;\n this.rowLock = 0;\n this.columnLock = 0;\n this.priority = 0;\n this.relativePositioning = 0;\n this.anchorVertical = 0;\n this.anchorHorizontal = 0;\n this.anchorPoint = 0;\n this.rowCount = 1;\n this.virtualRowCount = this.rowCount + 1;\n this.columnCount = 41;\n this.windowStyle = 0;\n this.penStyle = 0;\n };\n Cea708Window.prototype.getText = function () {\n return this.rows.join('\\n');\n };\n Cea708Window.prototype.clearText = function () {\n this.rows = [''];\n this.rowIdx = 0;\n };\n Cea708Window.prototype.newLine = function (pts) {\n if (this.rows.length >= this.virtualRowCount && typeof this.beforeRowOverflow === 'function') {\n this.beforeRowOverflow(pts);\n }\n if (this.rows.length > 0) {\n this.rows.push('');\n this.rowIdx++;\n } // Show all virtual rows since there's no visible scrolling\n\n while (this.rows.length > this.virtualRowCount) {\n this.rows.shift();\n this.rowIdx--;\n }\n };\n Cea708Window.prototype.isEmpty = function () {\n if (this.rows.length === 0) {\n return true;\n } else if (this.rows.length === 1) {\n return this.rows[0] === '';\n }\n return false;\n };\n Cea708Window.prototype.addText = function (text) {\n this.rows[this.rowIdx] += text;\n };\n Cea708Window.prototype.backspace = function () {\n if (!this.isEmpty()) {\n var row = this.rows[this.rowIdx];\n this.rows[this.rowIdx] = row.substr(0, row.length - 1);\n }\n };\n var Cea708Service = function (serviceNum, encoding, stream) {\n this.serviceNum = serviceNum;\n this.text = '';\n this.currentWindow = new Cea708Window(-1);\n this.windows = [];\n this.stream = stream; // Try to setup a TextDecoder if an `encoding` value was provided\n\n if (typeof encoding === 'string') {\n this.createTextDecoder(encoding);\n }\n };\n /**\n * Initialize service windows\n * Must be run before service use\n *\n * @param {Integer} pts PTS value\n * @param {Function} beforeRowOverflow Function to execute before row overflow of a window\n */\n\n Cea708Service.prototype.init = function (pts, beforeRowOverflow) {\n this.startPts = pts;\n for (var win = 0; win < 8; win++) {\n this.windows[win] = new Cea708Window(win);\n if (typeof beforeRowOverflow === 'function') {\n this.windows[win].beforeRowOverflow = beforeRowOverflow;\n }\n }\n };\n /**\n * Set current window of service to be affected by commands\n *\n * @param {Integer} windowNum Window number\n */\n\n Cea708Service.prototype.setCurrentWindow = function (windowNum) {\n this.currentWindow = this.windows[windowNum];\n };\n /**\n * Try to create a TextDecoder if it is natively supported\n */\n\n Cea708Service.prototype.createTextDecoder = function (encoding) {\n if (typeof TextDecoder === 'undefined') {\n this.stream.trigger('log', {\n level: 'warn',\n message: 'The `encoding` option is unsupported without TextDecoder support'\n });\n } else {\n try {\n this.textDecoder_ = new TextDecoder(encoding);\n } catch (error) {\n this.stream.trigger('log', {\n level: 'warn',\n message: 'TextDecoder could not be created with ' + encoding + ' encoding. ' + error\n });\n }\n }\n };\n var Cea708Stream = function (options) {\n options = options || {};\n Cea708Stream.prototype.init.call(this);\n var self = this;\n var captionServices = options.captionServices || {};\n var captionServiceEncodings = {};\n var serviceProps; // Get service encodings from captionServices option block\n\n Object.keys(captionServices).forEach(serviceName => {\n serviceProps = captionServices[serviceName];\n if (/^SERVICE/.test(serviceName)) {\n captionServiceEncodings[serviceName] = serviceProps.encoding;\n }\n });\n this.serviceEncodings = captionServiceEncodings;\n this.current708Packet = null;\n this.services = {};\n this.push = function (packet) {\n if (packet.type === 3) {\n // 708 packet start\n self.new708Packet();\n self.add708Bytes(packet);\n } else {\n if (self.current708Packet === null) {\n // This should only happen at the start of a file if there's no packet start.\n self.new708Packet();\n }\n self.add708Bytes(packet);\n }\n };\n };\n Cea708Stream.prototype = new Stream$7();\n /**\n * Push current 708 packet, create new 708 packet.\n */\n\n Cea708Stream.prototype.new708Packet = function () {\n if (this.current708Packet !== null) {\n this.push708Packet();\n }\n this.current708Packet = {\n data: [],\n ptsVals: []\n };\n };\n /**\n * Add pts and both bytes from packet into current 708 packet.\n */\n\n Cea708Stream.prototype.add708Bytes = function (packet) {\n var data = packet.ccData;\n var byte0 = data >>> 8;\n var byte1 = data & 0xff; // I would just keep a list of packets instead of bytes, but it isn't clear in the spec\n // that service blocks will always line up with byte pairs.\n\n this.current708Packet.ptsVals.push(packet.pts);\n this.current708Packet.data.push(byte0);\n this.current708Packet.data.push(byte1);\n };\n /**\n * Parse completed 708 packet into service blocks and push each service block.\n */\n\n Cea708Stream.prototype.push708Packet = function () {\n var packet708 = this.current708Packet;\n var packetData = packet708.data;\n var serviceNum = null;\n var blockSize = null;\n var i = 0;\n var b = packetData[i++];\n packet708.seq = b >> 6;\n packet708.sizeCode = b & 0x3f; // 0b00111111;\n\n for (; i < packetData.length; i++) {\n b = packetData[i++];\n serviceNum = b >> 5;\n blockSize = b & 0x1f; // 0b00011111\n\n if (serviceNum === 7 && blockSize > 0) {\n // Extended service num\n b = packetData[i++];\n serviceNum = b;\n }\n this.pushServiceBlock(serviceNum, i, blockSize);\n if (blockSize > 0) {\n i += blockSize - 1;\n }\n }\n };\n /**\n * Parse service block, execute commands, read text.\n *\n * Note: While many of these commands serve important purposes,\n * many others just parse out the parameters or attributes, but\n * nothing is done with them because this is not a full and complete\n * implementation of the entire 708 spec.\n *\n * @param {Integer} serviceNum Service number\n * @param {Integer} start Start index of the 708 packet data\n * @param {Integer} size Block size\n */\n\n Cea708Stream.prototype.pushServiceBlock = function (serviceNum, start, size) {\n var b;\n var i = start;\n var packetData = this.current708Packet.data;\n var service = this.services[serviceNum];\n if (!service) {\n service = this.initService(serviceNum, i);\n }\n for (; i < start + size && i < packetData.length; i++) {\n b = packetData[i];\n if (within708TextBlock(b)) {\n i = this.handleText(i, service);\n } else if (b === 0x18) {\n i = this.multiByteCharacter(i, service);\n } else if (b === 0x10) {\n i = this.extendedCommands(i, service);\n } else if (0x80 <= b && b <= 0x87) {\n i = this.setCurrentWindow(i, service);\n } else if (0x98 <= b && b <= 0x9f) {\n i = this.defineWindow(i, service);\n } else if (b === 0x88) {\n i = this.clearWindows(i, service);\n } else if (b === 0x8c) {\n i = this.deleteWindows(i, service);\n } else if (b === 0x89) {\n i = this.displayWindows(i, service);\n } else if (b === 0x8a) {\n i = this.hideWindows(i, service);\n } else if (b === 0x8b) {\n i = this.toggleWindows(i, service);\n } else if (b === 0x97) {\n i = this.setWindowAttributes(i, service);\n } else if (b === 0x90) {\n i = this.setPenAttributes(i, service);\n } else if (b === 0x91) {\n i = this.setPenColor(i, service);\n } else if (b === 0x92) {\n i = this.setPenLocation(i, service);\n } else if (b === 0x8f) {\n service = this.reset(i, service);\n } else if (b === 0x08) {\n // BS: Backspace\n service.currentWindow.backspace();\n } else if (b === 0x0c) {\n // FF: Form feed\n service.currentWindow.clearText();\n } else if (b === 0x0d) {\n // CR: Carriage return\n service.currentWindow.pendingNewLine = true;\n } else if (b === 0x0e) {\n // HCR: Horizontal carriage return\n service.currentWindow.clearText();\n } else if (b === 0x8d) {\n // DLY: Delay, nothing to do\n i++;\n } else ;\n }\n };\n /**\n * Execute an extended command\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.extendedCommands = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n if (within708TextBlock(b)) {\n i = this.handleText(i, service, {\n isExtended: true\n });\n }\n return i;\n };\n /**\n * Get PTS value of a given byte index\n *\n * @param {Integer} byteIndex Index of the byte\n * @return {Integer} PTS\n */\n\n Cea708Stream.prototype.getPts = function (byteIndex) {\n // There's 1 pts value per 2 bytes\n return this.current708Packet.ptsVals[Math.floor(byteIndex / 2)];\n };\n /**\n * Initializes a service\n *\n * @param {Integer} serviceNum Service number\n * @return {Service} Initialized service object\n */\n\n Cea708Stream.prototype.initService = function (serviceNum, i) {\n var serviceName = 'SERVICE' + serviceNum;\n var self = this;\n var serviceName;\n var encoding;\n if (serviceName in this.serviceEncodings) {\n encoding = this.serviceEncodings[serviceName];\n }\n this.services[serviceNum] = new Cea708Service(serviceNum, encoding, self);\n this.services[serviceNum].init(this.getPts(i), function (pts) {\n self.flushDisplayed(pts, self.services[serviceNum]);\n });\n return this.services[serviceNum];\n };\n /**\n * Execute text writing to current window\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.handleText = function (i, service, options) {\n var isExtended = options && options.isExtended;\n var isMultiByte = options && options.isMultiByte;\n var packetData = this.current708Packet.data;\n var extended = isExtended ? 0x1000 : 0x0000;\n var currentByte = packetData[i];\n var nextByte = packetData[i + 1];\n var win = service.currentWindow;\n var char;\n var charCodeArray; // Converts an array of bytes to a unicode hex string.\n\n function toHexString(byteArray) {\n return byteArray.map(byte => {\n return ('0' + (byte & 0xFF).toString(16)).slice(-2);\n }).join('');\n }\n if (isMultiByte) {\n charCodeArray = [currentByte, nextByte];\n i++;\n } else {\n charCodeArray = [currentByte];\n } // Use the TextDecoder if one was created for this service\n\n if (service.textDecoder_ && !isExtended) {\n char = service.textDecoder_.decode(new Uint8Array(charCodeArray));\n } else {\n // We assume any multi-byte char without a decoder is unicode.\n if (isMultiByte) {\n const unicode = toHexString(charCodeArray); // Takes a unicode hex string and creates a single character.\n\n char = String.fromCharCode(parseInt(unicode, 16));\n } else {\n char = get708CharFromCode(extended | currentByte);\n }\n }\n if (win.pendingNewLine && !win.isEmpty()) {\n win.newLine(this.getPts(i));\n }\n win.pendingNewLine = false;\n win.addText(char);\n return i;\n };\n /**\n * Handle decoding of multibyte character\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.multiByteCharacter = function (i, service) {\n var packetData = this.current708Packet.data;\n var firstByte = packetData[i + 1];\n var secondByte = packetData[i + 2];\n if (within708TextBlock(firstByte) && within708TextBlock(secondByte)) {\n i = this.handleText(++i, service, {\n isMultiByte: true\n });\n }\n return i;\n };\n /**\n * Parse and execute the CW# command.\n *\n * Set the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setCurrentWindow = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var windowNum = b & 0x07;\n service.setCurrentWindow(windowNum);\n return i;\n };\n /**\n * Parse and execute the DF# command.\n *\n * Define a window and set it as the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.defineWindow = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var windowNum = b & 0x07;\n service.setCurrentWindow(windowNum);\n var win = service.currentWindow;\n b = packetData[++i];\n win.visible = (b & 0x20) >> 5; // v\n\n win.rowLock = (b & 0x10) >> 4; // rl\n\n win.columnLock = (b & 0x08) >> 3; // cl\n\n win.priority = b & 0x07; // p\n\n b = packetData[++i];\n win.relativePositioning = (b & 0x80) >> 7; // rp\n\n win.anchorVertical = b & 0x7f; // av\n\n b = packetData[++i];\n win.anchorHorizontal = b; // ah\n\n b = packetData[++i];\n win.anchorPoint = (b & 0xf0) >> 4; // ap\n\n win.rowCount = b & 0x0f; // rc\n\n b = packetData[++i];\n win.columnCount = b & 0x3f; // cc\n\n b = packetData[++i];\n win.windowStyle = (b & 0x38) >> 3; // ws\n\n win.penStyle = b & 0x07; // ps\n // The spec says there are (rowCount+1) \"virtual rows\"\n\n win.virtualRowCount = win.rowCount + 1;\n return i;\n };\n /**\n * Parse and execute the SWA command.\n *\n * Set attributes of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setWindowAttributes = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var winAttr = service.currentWindow.winAttr;\n b = packetData[++i];\n winAttr.fillOpacity = (b & 0xc0) >> 6; // fo\n\n winAttr.fillRed = (b & 0x30) >> 4; // fr\n\n winAttr.fillGreen = (b & 0x0c) >> 2; // fg\n\n winAttr.fillBlue = b & 0x03; // fb\n\n b = packetData[++i];\n winAttr.borderType = (b & 0xc0) >> 6; // bt\n\n winAttr.borderRed = (b & 0x30) >> 4; // br\n\n winAttr.borderGreen = (b & 0x0c) >> 2; // bg\n\n winAttr.borderBlue = b & 0x03; // bb\n\n b = packetData[++i];\n winAttr.borderType += (b & 0x80) >> 5; // bt\n\n winAttr.wordWrap = (b & 0x40) >> 6; // ww\n\n winAttr.printDirection = (b & 0x30) >> 4; // pd\n\n winAttr.scrollDirection = (b & 0x0c) >> 2; // sd\n\n winAttr.justify = b & 0x03; // j\n\n b = packetData[++i];\n winAttr.effectSpeed = (b & 0xf0) >> 4; // es\n\n winAttr.effectDirection = (b & 0x0c) >> 2; // ed\n\n winAttr.displayEffect = b & 0x03; // de\n\n return i;\n };\n /**\n * Gather text from all displayed windows and push a caption to output.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n */\n\n Cea708Stream.prototype.flushDisplayed = function (pts, service) {\n var displayedText = []; // TODO: Positioning not supported, displaying multiple windows will not necessarily\n // display text in the correct order, but sample files so far have not shown any issue.\n\n for (var winId = 0; winId < 8; winId++) {\n if (service.windows[winId].visible && !service.windows[winId].isEmpty()) {\n displayedText.push(service.windows[winId].getText());\n }\n }\n service.endPts = pts;\n service.text = displayedText.join('\\n\\n');\n this.pushCaption(service);\n service.startPts = pts;\n };\n /**\n * Push a caption to output if the caption contains text.\n *\n * @param {Service} service The service object to be affected\n */\n\n Cea708Stream.prototype.pushCaption = function (service) {\n if (service.text !== '') {\n this.trigger('data', {\n startPts: service.startPts,\n endPts: service.endPts,\n text: service.text,\n stream: 'cc708_' + service.serviceNum\n });\n service.text = '';\n service.startPts = service.endPts;\n }\n };\n /**\n * Parse and execute the DSW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.displayWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible = 1;\n }\n }\n return i;\n };\n /**\n * Parse and execute the HDW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.hideWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible = 0;\n }\n }\n return i;\n };\n /**\n * Parse and execute the TGW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.toggleWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible ^= 1;\n }\n }\n return i;\n };\n /**\n * Parse and execute the CLW command.\n *\n * Clear text of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.clearWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].clearText();\n }\n }\n return i;\n };\n /**\n * Parse and execute the DLW command.\n *\n * Re-initialize windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.deleteWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].reset();\n }\n }\n return i;\n };\n /**\n * Parse and execute the SPA command.\n *\n * Set pen attributes of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setPenAttributes = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penAttr = service.currentWindow.penAttr;\n b = packetData[++i];\n penAttr.textTag = (b & 0xf0) >> 4; // tt\n\n penAttr.offset = (b & 0x0c) >> 2; // o\n\n penAttr.penSize = b & 0x03; // s\n\n b = packetData[++i];\n penAttr.italics = (b & 0x80) >> 7; // i\n\n penAttr.underline = (b & 0x40) >> 6; // u\n\n penAttr.edgeType = (b & 0x38) >> 3; // et\n\n penAttr.fontStyle = b & 0x07; // fs\n\n return i;\n };\n /**\n * Parse and execute the SPC command.\n *\n * Set pen color of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setPenColor = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penColor = service.currentWindow.penColor;\n b = packetData[++i];\n penColor.fgOpacity = (b & 0xc0) >> 6; // fo\n\n penColor.fgRed = (b & 0x30) >> 4; // fr\n\n penColor.fgGreen = (b & 0x0c) >> 2; // fg\n\n penColor.fgBlue = b & 0x03; // fb\n\n b = packetData[++i];\n penColor.bgOpacity = (b & 0xc0) >> 6; // bo\n\n penColor.bgRed = (b & 0x30) >> 4; // br\n\n penColor.bgGreen = (b & 0x0c) >> 2; // bg\n\n penColor.bgBlue = b & 0x03; // bb\n\n b = packetData[++i];\n penColor.edgeRed = (b & 0x30) >> 4; // er\n\n penColor.edgeGreen = (b & 0x0c) >> 2; // eg\n\n penColor.edgeBlue = b & 0x03; // eb\n\n return i;\n };\n /**\n * Parse and execute the SPL command.\n *\n * Set pen location of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setPenLocation = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penLoc = service.currentWindow.penLoc; // Positioning isn't really supported at the moment, so this essentially just inserts a linebreak\n\n service.currentWindow.pendingNewLine = true;\n b = packetData[++i];\n penLoc.row = b & 0x0f; // r\n\n b = packetData[++i];\n penLoc.column = b & 0x3f; // c\n\n return i;\n };\n /**\n * Execute the RST command.\n *\n * Reset service to a clean slate. Re-initialize.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Service} Re-initialized service\n */\n\n Cea708Stream.prototype.reset = function (i, service) {\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n return this.initService(service.serviceNum, i);\n }; // This hash maps non-ASCII, special, and extended character codes to their\n // proper Unicode equivalent. The first keys that are only a single byte\n // are the non-standard ASCII characters, which simply map the CEA608 byte\n // to the standard ASCII/Unicode. The two-byte keys that follow are the CEA608\n // character codes, but have their MSB bitmasked with 0x03 so that a lookup\n // can be performed regardless of the field and data channel on which the\n // character code was received.\n\n var CHARACTER_TRANSLATION = {\n 0x2a: 0xe1,\n // \u00E1\n 0x5c: 0xe9,\n // \u00E9\n 0x5e: 0xed,\n // \u00ED\n 0x5f: 0xf3,\n // \u00F3\n 0x60: 0xfa,\n // \u00FA\n 0x7b: 0xe7,\n // \u00E7\n 0x7c: 0xf7,\n // \u00F7\n 0x7d: 0xd1,\n // \u00D1\n 0x7e: 0xf1,\n // \u00F1\n 0x7f: 0x2588,\n // \u2588\n 0x0130: 0xae,\n // \u00AE\n 0x0131: 0xb0,\n // \u00B0\n 0x0132: 0xbd,\n // \u00BD\n 0x0133: 0xbf,\n // \u00BF\n 0x0134: 0x2122,\n // \u2122\n 0x0135: 0xa2,\n // \u00A2\n 0x0136: 0xa3,\n // \u00A3\n 0x0137: 0x266a,\n // \u266A\n 0x0138: 0xe0,\n // \u00E0\n 0x0139: 0xa0,\n //\n 0x013a: 0xe8,\n // \u00E8\n 0x013b: 0xe2,\n // \u00E2\n 0x013c: 0xea,\n // \u00EA\n 0x013d: 0xee,\n // \u00EE\n 0x013e: 0xf4,\n // \u00F4\n 0x013f: 0xfb,\n // \u00FB\n 0x0220: 0xc1,\n // \u00C1\n 0x0221: 0xc9,\n // \u00C9\n 0x0222: 0xd3,\n // \u00D3\n 0x0223: 0xda,\n // \u00DA\n 0x0224: 0xdc,\n // \u00DC\n 0x0225: 0xfc,\n // \u00FC\n 0x0226: 0x2018,\n // \u2018\n 0x0227: 0xa1,\n // \u00A1\n 0x0228: 0x2a,\n // *\n 0x0229: 0x27,\n // '\n 0x022a: 0x2014,\n // \u2014\n 0x022b: 0xa9,\n // \u00A9\n 0x022c: 0x2120,\n // \u2120\n 0x022d: 0x2022,\n // \u2022\n 0x022e: 0x201c,\n // \u201C\n 0x022f: 0x201d,\n // \u201D\n 0x0230: 0xc0,\n // \u00C0\n 0x0231: 0xc2,\n // \u00C2\n 0x0232: 0xc7,\n // \u00C7\n 0x0233: 0xc8,\n // \u00C8\n 0x0234: 0xca,\n // \u00CA\n 0x0235: 0xcb,\n // \u00CB\n 0x0236: 0xeb,\n // \u00EB\n 0x0237: 0xce,\n // \u00CE\n 0x0238: 0xcf,\n // \u00CF\n 0x0239: 0xef,\n // \u00EF\n 0x023a: 0xd4,\n // \u00D4\n 0x023b: 0xd9,\n // \u00D9\n 0x023c: 0xf9,\n // \u00F9\n 0x023d: 0xdb,\n // \u00DB\n 0x023e: 0xab,\n // \u00AB\n 0x023f: 0xbb,\n // \u00BB\n 0x0320: 0xc3,\n // \u00C3\n 0x0321: 0xe3,\n // \u00E3\n 0x0322: 0xcd,\n // \u00CD\n 0x0323: 0xcc,\n // \u00CC\n 0x0324: 0xec,\n // \u00EC\n 0x0325: 0xd2,\n // \u00D2\n 0x0326: 0xf2,\n // \u00F2\n 0x0327: 0xd5,\n // \u00D5\n 0x0328: 0xf5,\n // \u00F5\n 0x0329: 0x7b,\n // {\n 0x032a: 0x7d,\n // }\n 0x032b: 0x5c,\n // \\\n 0x032c: 0x5e,\n // ^\n 0x032d: 0x5f,\n // _\n 0x032e: 0x7c,\n // |\n 0x032f: 0x7e,\n // ~\n 0x0330: 0xc4,\n // \u00C4\n 0x0331: 0xe4,\n // \u00E4\n 0x0332: 0xd6,\n // \u00D6\n 0x0333: 0xf6,\n // \u00F6\n 0x0334: 0xdf,\n // \u00DF\n 0x0335: 0xa5,\n // \u00A5\n 0x0336: 0xa4,\n // \u00A4\n 0x0337: 0x2502,\n // \u2502\n 0x0338: 0xc5,\n // \u00C5\n 0x0339: 0xe5,\n // \u00E5\n 0x033a: 0xd8,\n // \u00D8\n 0x033b: 0xf8,\n // \u00F8\n 0x033c: 0x250c,\n // \u250C\n 0x033d: 0x2510,\n // \u2510\n 0x033e: 0x2514,\n // \u2514\n 0x033f: 0x2518 // \u2518\n };\n var getCharFromCode = function (code) {\n if (code === null) {\n return '';\n }\n code = CHARACTER_TRANSLATION[code] || code;\n return String.fromCharCode(code);\n }; // the index of the last row in a CEA-608 display buffer\n\n var BOTTOM_ROW = 14; // This array is used for mapping PACs -> row #, since there's no way of\n // getting it through bit logic.\n\n var ROWS = [0x1100, 0x1120, 0x1200, 0x1220, 0x1500, 0x1520, 0x1600, 0x1620, 0x1700, 0x1720, 0x1000, 0x1300, 0x1320, 0x1400, 0x1420]; // CEA-608 captions are rendered onto a 34x15 matrix of character\n // cells. The \"bottom\" row is the last element in the outer array.\n // We keep track of positioning information as we go by storing the\n // number of indentations and the tab offset in this buffer.\n\n var createDisplayBuffer = function () {\n var result = [],\n i = BOTTOM_ROW + 1;\n while (i--) {\n result.push({\n text: '',\n indent: 0,\n offset: 0\n });\n }\n return result;\n };\n var Cea608Stream = function (field, dataChannel) {\n Cea608Stream.prototype.init.call(this);\n this.field_ = field || 0;\n this.dataChannel_ = dataChannel || 0;\n this.name_ = 'CC' + ((this.field_ << 1 | this.dataChannel_) + 1);\n this.setConstants();\n this.reset();\n this.push = function (packet) {\n var data, swap, char0, char1, text; // remove the parity bits\n\n data = packet.ccData & 0x7f7f; // ignore duplicate control codes; the spec demands they're sent twice\n\n if (data === this.lastControlCode_) {\n this.lastControlCode_ = null;\n return;\n } // Store control codes\n\n if ((data & 0xf000) === 0x1000) {\n this.lastControlCode_ = data;\n } else if (data !== this.PADDING_) {\n this.lastControlCode_ = null;\n }\n char0 = data >>> 8;\n char1 = data & 0xff;\n if (data === this.PADDING_) {\n return;\n } else if (data === this.RESUME_CAPTION_LOADING_) {\n this.mode_ = 'popOn';\n } else if (data === this.END_OF_CAPTION_) {\n // If an EOC is received while in paint-on mode, the displayed caption\n // text should be swapped to non-displayed memory as if it was a pop-on\n // caption. Because of that, we should explicitly switch back to pop-on\n // mode\n this.mode_ = 'popOn';\n this.clearFormatting(packet.pts); // if a caption was being displayed, it's gone now\n\n this.flushDisplayed(packet.pts); // flip memory\n\n swap = this.displayed_;\n this.displayed_ = this.nonDisplayed_;\n this.nonDisplayed_ = swap; // start measuring the time to display the caption\n\n this.startPts_ = packet.pts;\n } else if (data === this.ROLL_UP_2_ROWS_) {\n this.rollUpRows_ = 2;\n this.setRollUp(packet.pts);\n } else if (data === this.ROLL_UP_3_ROWS_) {\n this.rollUpRows_ = 3;\n this.setRollUp(packet.pts);\n } else if (data === this.ROLL_UP_4_ROWS_) {\n this.rollUpRows_ = 4;\n this.setRollUp(packet.pts);\n } else if (data === this.CARRIAGE_RETURN_) {\n this.clearFormatting(packet.pts);\n this.flushDisplayed(packet.pts);\n this.shiftRowsUp_();\n this.startPts_ = packet.pts;\n } else if (data === this.BACKSPACE_) {\n if (this.mode_ === 'popOn') {\n this.nonDisplayed_[this.row_].text = this.nonDisplayed_[this.row_].text.slice(0, -1);\n } else {\n this.displayed_[this.row_].text = this.displayed_[this.row_].text.slice(0, -1);\n }\n } else if (data === this.ERASE_DISPLAYED_MEMORY_) {\n this.flushDisplayed(packet.pts);\n this.displayed_ = createDisplayBuffer();\n } else if (data === this.ERASE_NON_DISPLAYED_MEMORY_) {\n this.nonDisplayed_ = createDisplayBuffer();\n } else if (data === this.RESUME_DIRECT_CAPTIONING_) {\n if (this.mode_ !== 'paintOn') {\n // NOTE: This should be removed when proper caption positioning is\n // implemented\n this.flushDisplayed(packet.pts);\n this.displayed_ = createDisplayBuffer();\n }\n this.mode_ = 'paintOn';\n this.startPts_ = packet.pts; // Append special characters to caption text\n } else if (this.isSpecialCharacter(char0, char1)) {\n // Bitmask char0 so that we can apply character transformations\n // regardless of field and data channel.\n // Then byte-shift to the left and OR with char1 so we can pass the\n // entire character code to `getCharFromCode`.\n char0 = (char0 & 0x03) << 8;\n text = getCharFromCode(char0 | char1);\n this[this.mode_](packet.pts, text);\n this.column_++; // Append extended characters to caption text\n } else if (this.isExtCharacter(char0, char1)) {\n // Extended characters always follow their \"non-extended\" equivalents.\n // IE if a \"\u00E8\" is desired, you'll always receive \"e\u00E8\"; non-compliant\n // decoders are supposed to drop the \"\u00E8\", while compliant decoders\n // backspace the \"e\" and insert \"\u00E8\".\n // Delete the previous character\n if (this.mode_ === 'popOn') {\n this.nonDisplayed_[this.row_].text = this.nonDisplayed_[this.row_].text.slice(0, -1);\n } else {\n this.displayed_[this.row_].text = this.displayed_[this.row_].text.slice(0, -1);\n } // Bitmask char0 so that we can apply character transformations\n // regardless of field and data channel.\n // Then byte-shift to the left and OR with char1 so we can pass the\n // entire character code to `getCharFromCode`.\n\n char0 = (char0 & 0x03) << 8;\n text = getCharFromCode(char0 | char1);\n this[this.mode_](packet.pts, text);\n this.column_++; // Process mid-row codes\n } else if (this.isMidRowCode(char0, char1)) {\n // Attributes are not additive, so clear all formatting\n this.clearFormatting(packet.pts); // According to the standard, mid-row codes\n // should be replaced with spaces, so add one now\n\n this[this.mode_](packet.pts, ' ');\n this.column_++;\n if ((char1 & 0xe) === 0xe) {\n this.addFormatting(packet.pts, ['i']);\n }\n if ((char1 & 0x1) === 0x1) {\n this.addFormatting(packet.pts, ['u']);\n } // Detect offset control codes and adjust cursor\n } else if (this.isOffsetControlCode(char0, char1)) {\n // Cursor position is set by indent PAC (see below) in 4-column\n // increments, with an additional offset code of 1-3 to reach any\n // of the 32 columns specified by CEA-608. So all we need to do\n // here is increment the column cursor by the given offset.\n const offset = char1 & 0x03; // For an offest value 1-3, set the offset for that caption\n // in the non-displayed array.\n\n this.nonDisplayed_[this.row_].offset = offset;\n this.column_ += offset; // Detect PACs (Preamble Address Codes)\n } else if (this.isPAC(char0, char1)) {\n // There's no logic for PAC -> row mapping, so we have to just\n // find the row code in an array and use its index :(\n var row = ROWS.indexOf(data & 0x1f20); // Configure the caption window if we're in roll-up mode\n\n if (this.mode_ === 'rollUp') {\n // This implies that the base row is incorrectly set.\n // As per the recommendation in CEA-608(Base Row Implementation), defer to the number\n // of roll-up rows set.\n if (row - this.rollUpRows_ + 1 < 0) {\n row = this.rollUpRows_ - 1;\n }\n this.setRollUp(packet.pts, row);\n } // Ensure the row is between 0 and 14, otherwise use the most\n // recent or default row.\n\n if (row !== this.row_ && row >= 0 && row <= 14) {\n // formatting is only persistent for current row\n this.clearFormatting(packet.pts);\n this.row_ = row;\n } // All PACs can apply underline, so detect and apply\n // (All odd-numbered second bytes set underline)\n\n if (char1 & 0x1 && this.formatting_.indexOf('u') === -1) {\n this.addFormatting(packet.pts, ['u']);\n }\n if ((data & 0x10) === 0x10) {\n // We've got an indent level code. Each successive even number\n // increments the column cursor by 4, so we can get the desired\n // column position by bit-shifting to the right (to get n/2)\n // and multiplying by 4.\n const indentations = (data & 0xe) >> 1;\n this.column_ = indentations * 4; // add to the number of indentations for positioning\n\n this.nonDisplayed_[this.row_].indent += indentations;\n }\n if (this.isColorPAC(char1)) {\n // it's a color code, though we only support white, which\n // can be either normal or italicized. white italics can be\n // either 0x4e or 0x6e depending on the row, so we just\n // bitwise-and with 0xe to see if italics should be turned on\n if ((char1 & 0xe) === 0xe) {\n this.addFormatting(packet.pts, ['i']);\n }\n } // We have a normal character in char0, and possibly one in char1\n } else if (this.isNormalChar(char0)) {\n if (char1 === 0x00) {\n char1 = null;\n }\n text = getCharFromCode(char0);\n text += getCharFromCode(char1);\n this[this.mode_](packet.pts, text);\n this.column_ += text.length;\n } // finish data processing\n };\n };\n Cea608Stream.prototype = new Stream$7(); // Trigger a cue point that captures the current state of the\n // display buffer\n\n Cea608Stream.prototype.flushDisplayed = function (pts) {\n const logWarning = index => {\n this.trigger('log', {\n level: 'warn',\n message: 'Skipping a malformed 608 caption at index ' + index + '.'\n });\n };\n const content = [];\n this.displayed_.forEach((row, i) => {\n if (row && row.text && row.text.length) {\n try {\n // remove spaces from the start and end of the string\n row.text = row.text.trim();\n } catch (e) {\n // Ordinarily, this shouldn't happen. However, caption\n // parsing errors should not throw exceptions and\n // break playback.\n logWarning(i);\n } // See the below link for more details on the following fields:\n // https://dvcs.w3.org/hg/text-tracks/raw-file/default/608toVTT/608toVTT.html#positioning-in-cea-608\n\n if (row.text.length) {\n content.push({\n // The text to be displayed in the caption from this specific row, with whitespace removed.\n text: row.text,\n // Value between 1 and 15 representing the PAC row used to calculate line height.\n line: i + 1,\n // A number representing the indent position by percentage (CEA-608 PAC indent code).\n // The value will be a number between 10 and 80. Offset is used to add an aditional\n // value to the position if necessary.\n position: 10 + Math.min(70, row.indent * 10) + row.offset * 2.5\n });\n }\n } else if (row === undefined || row === null) {\n logWarning(i);\n }\n });\n if (content.length) {\n this.trigger('data', {\n startPts: this.startPts_,\n endPts: pts,\n content,\n stream: this.name_\n });\n }\n };\n /**\n * Zero out the data, used for startup and on seek\n */\n\n Cea608Stream.prototype.reset = function () {\n this.mode_ = 'popOn'; // When in roll-up mode, the index of the last row that will\n // actually display captions. If a caption is shifted to a row\n // with a lower index than this, it is cleared from the display\n // buffer\n\n this.topRow_ = 0;\n this.startPts_ = 0;\n this.displayed_ = createDisplayBuffer();\n this.nonDisplayed_ = createDisplayBuffer();\n this.lastControlCode_ = null; // Track row and column for proper line-breaking and spacing\n\n this.column_ = 0;\n this.row_ = BOTTOM_ROW;\n this.rollUpRows_ = 2; // This variable holds currently-applied formatting\n\n this.formatting_ = [];\n };\n /**\n * Sets up control code and related constants for this instance\n */\n\n Cea608Stream.prototype.setConstants = function () {\n // The following attributes have these uses:\n // ext_ : char0 for mid-row codes, and the base for extended\n // chars (ext_+0, ext_+1, and ext_+2 are char0s for\n // extended codes)\n // control_: char0 for control codes, except byte-shifted to the\n // left so that we can do this.control_ | CONTROL_CODE\n // offset_: char0 for tab offset codes\n //\n // It's also worth noting that control codes, and _only_ control codes,\n // differ between field 1 and field2. Field 2 control codes are always\n // their field 1 value plus 1. That's why there's the \"| field\" on the\n // control value.\n if (this.dataChannel_ === 0) {\n this.BASE_ = 0x10;\n this.EXT_ = 0x11;\n this.CONTROL_ = (0x14 | this.field_) << 8;\n this.OFFSET_ = 0x17;\n } else if (this.dataChannel_ === 1) {\n this.BASE_ = 0x18;\n this.EXT_ = 0x19;\n this.CONTROL_ = (0x1c | this.field_) << 8;\n this.OFFSET_ = 0x1f;\n } // Constants for the LSByte command codes recognized by Cea608Stream. This\n // list is not exhaustive. For a more comprehensive listing and semantics see\n // http://www.gpo.gov/fdsys/pkg/CFR-2010-title47-vol1/pdf/CFR-2010-title47-vol1-sec15-119.pdf\n // Padding\n\n this.PADDING_ = 0x0000; // Pop-on Mode\n\n this.RESUME_CAPTION_LOADING_ = this.CONTROL_ | 0x20;\n this.END_OF_CAPTION_ = this.CONTROL_ | 0x2f; // Roll-up Mode\n\n this.ROLL_UP_2_ROWS_ = this.CONTROL_ | 0x25;\n this.ROLL_UP_3_ROWS_ = this.CONTROL_ | 0x26;\n this.ROLL_UP_4_ROWS_ = this.CONTROL_ | 0x27;\n this.CARRIAGE_RETURN_ = this.CONTROL_ | 0x2d; // paint-on mode\n\n this.RESUME_DIRECT_CAPTIONING_ = this.CONTROL_ | 0x29; // Erasure\n\n this.BACKSPACE_ = this.CONTROL_ | 0x21;\n this.ERASE_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2c;\n this.ERASE_NON_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2e;\n };\n /**\n * Detects if the 2-byte packet data is a special character\n *\n * Special characters have a second byte in the range 0x30 to 0x3f,\n * with the first byte being 0x11 (for data channel 1) or 0x19 (for\n * data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an special character\n */\n\n Cea608Stream.prototype.isSpecialCharacter = function (char0, char1) {\n return char0 === this.EXT_ && char1 >= 0x30 && char1 <= 0x3f;\n };\n /**\n * Detects if the 2-byte packet data is an extended character\n *\n * Extended characters have a second byte in the range 0x20 to 0x3f,\n * with the first byte being 0x12 or 0x13 (for data channel 1) or\n * 0x1a or 0x1b (for data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an extended character\n */\n\n Cea608Stream.prototype.isExtCharacter = function (char0, char1) {\n return (char0 === this.EXT_ + 1 || char0 === this.EXT_ + 2) && char1 >= 0x20 && char1 <= 0x3f;\n };\n /**\n * Detects if the 2-byte packet is a mid-row code\n *\n * Mid-row codes have a second byte in the range 0x20 to 0x2f, with\n * the first byte being 0x11 (for data channel 1) or 0x19 (for data\n * channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are a mid-row code\n */\n\n Cea608Stream.prototype.isMidRowCode = function (char0, char1) {\n return char0 === this.EXT_ && char1 >= 0x20 && char1 <= 0x2f;\n };\n /**\n * Detects if the 2-byte packet is an offset control code\n *\n * Offset control codes have a second byte in the range 0x21 to 0x23,\n * with the first byte being 0x17 (for data channel 1) or 0x1f (for\n * data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an offset control code\n */\n\n Cea608Stream.prototype.isOffsetControlCode = function (char0, char1) {\n return char0 === this.OFFSET_ && char1 >= 0x21 && char1 <= 0x23;\n };\n /**\n * Detects if the 2-byte packet is a Preamble Address Code\n *\n * PACs have a first byte in the range 0x10 to 0x17 (for data channel 1)\n * or 0x18 to 0x1f (for data channel 2), with the second byte in the\n * range 0x40 to 0x7f.\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are a PAC\n */\n\n Cea608Stream.prototype.isPAC = function (char0, char1) {\n return char0 >= this.BASE_ && char0 < this.BASE_ + 8 && char1 >= 0x40 && char1 <= 0x7f;\n };\n /**\n * Detects if a packet's second byte is in the range of a PAC color code\n *\n * PAC color codes have the second byte be in the range 0x40 to 0x4f, or\n * 0x60 to 0x6f.\n *\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the byte is a color PAC\n */\n\n Cea608Stream.prototype.isColorPAC = function (char1) {\n return char1 >= 0x40 && char1 <= 0x4f || char1 >= 0x60 && char1 <= 0x7f;\n };\n /**\n * Detects if a single byte is in the range of a normal character\n *\n * Normal text bytes are in the range 0x20 to 0x7f.\n *\n * @param {Integer} char The byte\n * @return {Boolean} Whether the byte is a normal character\n */\n\n Cea608Stream.prototype.isNormalChar = function (char) {\n return char >= 0x20 && char <= 0x7f;\n };\n /**\n * Configures roll-up\n *\n * @param {Integer} pts Current PTS\n * @param {Integer} newBaseRow Used by PACs to slide the current window to\n * a new position\n */\n\n Cea608Stream.prototype.setRollUp = function (pts, newBaseRow) {\n // Reset the base row to the bottom row when switching modes\n if (this.mode_ !== 'rollUp') {\n this.row_ = BOTTOM_ROW;\n this.mode_ = 'rollUp'; // Spec says to wipe memories when switching to roll-up\n\n this.flushDisplayed(pts);\n this.nonDisplayed_ = createDisplayBuffer();\n this.displayed_ = createDisplayBuffer();\n }\n if (newBaseRow !== undefined && newBaseRow !== this.row_) {\n // move currently displayed captions (up or down) to the new base row\n for (var i = 0; i < this.rollUpRows_; i++) {\n this.displayed_[newBaseRow - i] = this.displayed_[this.row_ - i];\n this.displayed_[this.row_ - i] = {\n text: '',\n indent: 0,\n offset: 0\n };\n }\n }\n if (newBaseRow === undefined) {\n newBaseRow = this.row_;\n }\n this.topRow_ = newBaseRow - this.rollUpRows_ + 1;\n }; // Adds the opening HTML tag for the passed character to the caption text,\n // and keeps track of it for later closing\n\n Cea608Stream.prototype.addFormatting = function (pts, format) {\n this.formatting_ = this.formatting_.concat(format);\n var text = format.reduce(function (text, format) {\n return text + '<' + format + '>';\n }, '');\n this[this.mode_](pts, text);\n }; // Adds HTML closing tags for current formatting to caption text and\n // clears remembered formatting\n\n Cea608Stream.prototype.clearFormatting = function (pts) {\n if (!this.formatting_.length) {\n return;\n }\n var text = this.formatting_.reverse().reduce(function (text, format) {\n return text + '';\n }, '');\n this.formatting_ = [];\n this[this.mode_](pts, text);\n }; // Mode Implementations\n\n Cea608Stream.prototype.popOn = function (pts, text) {\n var baseRow = this.nonDisplayed_[this.row_].text; // buffer characters\n\n baseRow += text;\n this.nonDisplayed_[this.row_].text = baseRow;\n };\n Cea608Stream.prototype.rollUp = function (pts, text) {\n var baseRow = this.displayed_[this.row_].text;\n baseRow += text;\n this.displayed_[this.row_].text = baseRow;\n };\n Cea608Stream.prototype.shiftRowsUp_ = function () {\n var i; // clear out inactive rows\n\n for (i = 0; i < this.topRow_; i++) {\n this.displayed_[i] = {\n text: '',\n indent: 0,\n offset: 0\n };\n }\n for (i = this.row_ + 1; i < BOTTOM_ROW + 1; i++) {\n this.displayed_[i] = {\n text: '',\n indent: 0,\n offset: 0\n };\n } // shift displayed rows up\n\n for (i = this.topRow_; i < this.row_; i++) {\n this.displayed_[i] = this.displayed_[i + 1];\n } // clear out the bottom row\n\n this.displayed_[this.row_] = {\n text: '',\n indent: 0,\n offset: 0\n };\n };\n Cea608Stream.prototype.paintOn = function (pts, text) {\n var baseRow = this.displayed_[this.row_].text;\n baseRow += text;\n this.displayed_[this.row_].text = baseRow;\n }; // exports\n\n var captionStream = {\n CaptionStream: CaptionStream$2,\n Cea608Stream: Cea608Stream,\n Cea708Stream: Cea708Stream\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var streamTypes = {\n H264_STREAM_TYPE: 0x1B,\n ADTS_STREAM_TYPE: 0x0F,\n METADATA_STREAM_TYPE: 0x15\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Accepts program elementary stream (PES) data events and corrects\n * decode and presentation time stamps to account for a rollover\n * of the 33 bit value.\n */\n\n var Stream$6 = stream;\n var MAX_TS = 8589934592;\n var RO_THRESH = 4294967296;\n var TYPE_SHARED = 'shared';\n var handleRollover$1 = function (value, reference) {\n var direction = 1;\n if (value > reference) {\n // If the current timestamp value is greater than our reference timestamp and we detect a\n // timestamp rollover, this means the roll over is happening in the opposite direction.\n // Example scenario: Enter a long stream/video just after a rollover occurred. The reference\n // point will be set to a small number, e.g. 1. The user then seeks backwards over the\n // rollover point. In loading this segment, the timestamp values will be very large,\n // e.g. 2^33 - 1. Since this comes before the data we loaded previously, we want to adjust\n // the time stamp to be `value - 2^33`.\n direction = -1;\n } // Note: A seek forwards or back that is greater than the RO_THRESH (2^32, ~13 hours) will\n // cause an incorrect adjustment.\n\n while (Math.abs(reference - value) > RO_THRESH) {\n value += direction * MAX_TS;\n }\n return value;\n };\n var TimestampRolloverStream$1 = function (type) {\n var lastDTS, referenceDTS;\n TimestampRolloverStream$1.prototype.init.call(this); // The \"shared\" type is used in cases where a stream will contain muxed\n // video and audio. We could use `undefined` here, but having a string\n // makes debugging a little clearer.\n\n this.type_ = type || TYPE_SHARED;\n this.push = function (data) {\n /**\n * Rollover stream expects data from elementary stream.\n * Elementary stream can push forward 2 types of data\n * - Parsed Video/Audio/Timed-metadata PES (packetized elementary stream) packets\n * - Tracks metadata from PMT (Program Map Table)\n * Rollover stream expects pts/dts info to be available, since it stores lastDTS\n * We should ignore non-PES packets since they may override lastDTS to undefined.\n * lastDTS is important to signal the next segments\n * about rollover from the previous segments.\n */\n if (data.type === 'metadata') {\n this.trigger('data', data);\n return;\n } // Any \"shared\" rollover streams will accept _all_ data. Otherwise,\n // streams will only accept data that matches their type.\n\n if (this.type_ !== TYPE_SHARED && data.type !== this.type_) {\n return;\n }\n if (referenceDTS === undefined) {\n referenceDTS = data.dts;\n }\n data.dts = handleRollover$1(data.dts, referenceDTS);\n data.pts = handleRollover$1(data.pts, referenceDTS);\n lastDTS = data.dts;\n this.trigger('data', data);\n };\n this.flush = function () {\n referenceDTS = lastDTS;\n this.trigger('done');\n };\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n this.discontinuity = function () {\n referenceDTS = void 0;\n lastDTS = void 0;\n };\n this.reset = function () {\n this.discontinuity();\n this.trigger('reset');\n };\n };\n TimestampRolloverStream$1.prototype = new Stream$6();\n var timestampRolloverStream = {\n TimestampRolloverStream: TimestampRolloverStream$1,\n handleRollover: handleRollover$1\n }; // Once IE11 support is dropped, this function should be removed.\n\n var typedArrayIndexOf$1 = (typedArray, element, fromIndex) => {\n if (!typedArray) {\n return -1;\n }\n var currentIndex = fromIndex;\n for (; currentIndex < typedArray.length; currentIndex++) {\n if (typedArray[currentIndex] === element) {\n return currentIndex;\n }\n }\n return -1;\n };\n var typedArray = {\n typedArrayIndexOf: typedArrayIndexOf$1\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Tools for parsing ID3 frame data\n * @see http://id3.org/id3v2.3.0\n */\n\n var typedArrayIndexOf = typedArray.typedArrayIndexOf,\n // Frames that allow different types of text encoding contain a text\n // encoding description byte [ID3v2.4.0 section 4.]\n textEncodingDescriptionByte = {\n Iso88591: 0x00,\n // ISO-8859-1, terminated with \\0.\n Utf16: 0x01,\n // UTF-16 encoded Unicode BOM, terminated with \\0\\0\n Utf16be: 0x02,\n // UTF-16BE encoded Unicode, without BOM, terminated with \\0\\0\n Utf8: 0x03 // UTF-8 encoded Unicode, terminated with \\0\n },\n // return a percent-encoded representation of the specified byte range\n // @see http://en.wikipedia.org/wiki/Percent-encoding \n percentEncode$1 = function (bytes, start, end) {\n var i,\n result = '';\n for (i = start; i < end; i++) {\n result += '%' + ('00' + bytes[i].toString(16)).slice(-2);\n }\n return result;\n },\n // return the string representation of the specified byte range,\n // interpreted as UTf-8.\n parseUtf8 = function (bytes, start, end) {\n return decodeURIComponent(percentEncode$1(bytes, start, end));\n },\n // return the string representation of the specified byte range,\n // interpreted as ISO-8859-1.\n parseIso88591$1 = function (bytes, start, end) {\n return unescape(percentEncode$1(bytes, start, end)); // jshint ignore:line\n },\n parseSyncSafeInteger$1 = function (data) {\n return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];\n },\n frameParsers = {\n 'APIC': function (frame) {\n var i = 1,\n mimeTypeEndIndex,\n descriptionEndIndex,\n LINK_MIME_TYPE = '-->';\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n } // parsing fields [ID3v2.4.0 section 4.14.]\n\n mimeTypeEndIndex = typedArrayIndexOf(frame.data, 0, i);\n if (mimeTypeEndIndex < 0) {\n // malformed frame\n return;\n } // parsing Mime type field (terminated with \\0)\n\n frame.mimeType = parseIso88591$1(frame.data, i, mimeTypeEndIndex);\n i = mimeTypeEndIndex + 1; // parsing 1-byte Picture Type field\n\n frame.pictureType = frame.data[i];\n i++;\n descriptionEndIndex = typedArrayIndexOf(frame.data, 0, i);\n if (descriptionEndIndex < 0) {\n // malformed frame\n return;\n } // parsing Description field (terminated with \\0)\n\n frame.description = parseUtf8(frame.data, i, descriptionEndIndex);\n i = descriptionEndIndex + 1;\n if (frame.mimeType === LINK_MIME_TYPE) {\n // parsing Picture Data field as URL (always represented as ISO-8859-1 [ID3v2.4.0 section 4.])\n frame.url = parseIso88591$1(frame.data, i, frame.data.length);\n } else {\n // parsing Picture Data field as binary data\n frame.pictureData = frame.data.subarray(i, frame.data.length);\n }\n },\n 'T*': function (frame) {\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n } // parse text field, do not include null terminator in the frame value\n // frames that allow different types of encoding contain terminated text [ID3v2.4.0 section 4.]\n\n frame.value = parseUtf8(frame.data, 1, frame.data.length).replace(/\\0*$/, ''); // text information frames supports multiple strings, stored as a terminator separated list [ID3v2.4.0 section 4.2.]\n\n frame.values = frame.value.split('\\0');\n },\n 'TXXX': function (frame) {\n var descriptionEndIndex;\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n }\n descriptionEndIndex = typedArrayIndexOf(frame.data, 0, 1);\n if (descriptionEndIndex === -1) {\n return;\n } // parse the text fields\n\n frame.description = parseUtf8(frame.data, 1, descriptionEndIndex); // do not include the null terminator in the tag value\n // frames that allow different types of encoding contain terminated text\n // [ID3v2.4.0 section 4.]\n\n frame.value = parseUtf8(frame.data, descriptionEndIndex + 1, frame.data.length).replace(/\\0*$/, '');\n frame.data = frame.value;\n },\n 'W*': function (frame) {\n // parse URL field; URL fields are always represented as ISO-8859-1 [ID3v2.4.0 section 4.]\n // if the value is followed by a string termination all the following information should be ignored [ID3v2.4.0 section 4.3]\n frame.url = parseIso88591$1(frame.data, 0, frame.data.length).replace(/\\0.*$/, '');\n },\n 'WXXX': function (frame) {\n var descriptionEndIndex;\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n }\n descriptionEndIndex = typedArrayIndexOf(frame.data, 0, 1);\n if (descriptionEndIndex === -1) {\n return;\n } // parse the description and URL fields\n\n frame.description = parseUtf8(frame.data, 1, descriptionEndIndex); // URL fields are always represented as ISO-8859-1 [ID3v2.4.0 section 4.]\n // if the value is followed by a string termination all the following information\n // should be ignored [ID3v2.4.0 section 4.3]\n\n frame.url = parseIso88591$1(frame.data, descriptionEndIndex + 1, frame.data.length).replace(/\\0.*$/, '');\n },\n 'PRIV': function (frame) {\n var i;\n for (i = 0; i < frame.data.length; i++) {\n if (frame.data[i] === 0) {\n // parse the description and URL fields\n frame.owner = parseIso88591$1(frame.data, 0, i);\n break;\n }\n }\n frame.privateData = frame.data.subarray(i + 1);\n frame.data = frame.privateData;\n }\n };\n var parseId3Frames$1 = function (data) {\n var frameSize,\n frameHeader,\n frameStart = 10,\n tagSize = 0,\n frames = []; // If we don't have enough data for a header, 10 bytes, \n // or 'ID3' in the first 3 bytes this is not a valid ID3 tag.\n\n if (data.length < 10 || data[0] !== 'I'.charCodeAt(0) || data[1] !== 'D'.charCodeAt(0) || data[2] !== '3'.charCodeAt(0)) {\n return;\n } // the frame size is transmitted as a 28-bit integer in the\n // last four bytes of the ID3 header.\n // The most significant bit of each byte is dropped and the\n // results concatenated to recover the actual value.\n\n tagSize = parseSyncSafeInteger$1(data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more\n // convenient for our comparisons to include it\n\n tagSize += 10; // check bit 6 of byte 5 for the extended header flag.\n\n var hasExtendedHeader = data[5] & 0x40;\n if (hasExtendedHeader) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += parseSyncSafeInteger$1(data.subarray(10, 14));\n tagSize -= parseSyncSafeInteger$1(data.subarray(16, 20)); // clip any padding off the end\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n do {\n // determine the number of bytes in this frame\n frameSize = parseSyncSafeInteger$1(data.subarray(frameStart + 4, frameStart + 8));\n if (frameSize < 1) {\n break;\n }\n frameHeader = String.fromCharCode(data[frameStart], data[frameStart + 1], data[frameStart + 2], data[frameStart + 3]);\n var frame = {\n id: frameHeader,\n data: data.subarray(frameStart + 10, frameStart + frameSize + 10)\n };\n frame.key = frame.id; // parse frame values\n\n if (frameParsers[frame.id]) {\n // use frame specific parser\n frameParsers[frame.id](frame);\n } else if (frame.id[0] === 'T') {\n // use text frame generic parser\n frameParsers['T*'](frame);\n } else if (frame.id[0] === 'W') {\n // use URL link frame generic parser\n frameParsers['W*'](frame);\n }\n frames.push(frame);\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < tagSize);\n return frames;\n };\n var parseId3 = {\n parseId3Frames: parseId3Frames$1,\n parseSyncSafeInteger: parseSyncSafeInteger$1,\n frameParsers: frameParsers\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Accepts program elementary stream (PES) data events and parses out\n * ID3 metadata from them, if present.\n * @see http://id3.org/id3v2.3.0\n */\n\n var Stream$5 = stream,\n StreamTypes$3 = streamTypes,\n id3 = parseId3,\n MetadataStream;\n MetadataStream = function (options) {\n var settings = {\n // the bytes of the program-level descriptor field in MP2T\n // see ISO/IEC 13818-1:2013 (E), section 2.6 \"Program and\n // program element descriptors\"\n descriptor: options && options.descriptor\n },\n // the total size in bytes of the ID3 tag being parsed\n tagSize = 0,\n // tag data that is not complete enough to be parsed\n buffer = [],\n // the total number of bytes currently in the buffer\n bufferSize = 0,\n i;\n MetadataStream.prototype.init.call(this); // calculate the text track in-band metadata track dispatch type\n // https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track\n\n this.dispatchType = StreamTypes$3.METADATA_STREAM_TYPE.toString(16);\n if (settings.descriptor) {\n for (i = 0; i < settings.descriptor.length; i++) {\n this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2);\n }\n }\n this.push = function (chunk) {\n var tag, frameStart, frameSize, frame, i, frameHeader;\n if (chunk.type !== 'timed-metadata') {\n return;\n } // if data_alignment_indicator is set in the PES header,\n // we must have the start of a new ID3 tag. Assume anything\n // remaining in the buffer was malformed and throw it out\n\n if (chunk.dataAlignmentIndicator) {\n bufferSize = 0;\n buffer.length = 0;\n } // ignore events that don't look like ID3 data\n\n if (buffer.length === 0 && (chunk.data.length < 10 || chunk.data[0] !== 'I'.charCodeAt(0) || chunk.data[1] !== 'D'.charCodeAt(0) || chunk.data[2] !== '3'.charCodeAt(0))) {\n this.trigger('log', {\n level: 'warn',\n message: 'Skipping unrecognized metadata packet'\n });\n return;\n } // add this chunk to the data we've collected so far\n\n buffer.push(chunk);\n bufferSize += chunk.data.byteLength; // grab the size of the entire frame from the ID3 header\n\n if (buffer.length === 1) {\n // the frame size is transmitted as a 28-bit integer in the\n // last four bytes of the ID3 header.\n // The most significant bit of each byte is dropped and the\n // results concatenated to recover the actual value.\n tagSize = id3.parseSyncSafeInteger(chunk.data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more\n // convenient for our comparisons to include it\n\n tagSize += 10;\n } // if the entire frame has not arrived, wait for more data\n\n if (bufferSize < tagSize) {\n return;\n } // collect the entire frame so it can be parsed\n\n tag = {\n data: new Uint8Array(tagSize),\n frames: [],\n pts: buffer[0].pts,\n dts: buffer[0].dts\n };\n for (i = 0; i < tagSize;) {\n tag.data.set(buffer[0].data.subarray(0, tagSize - i), i);\n i += buffer[0].data.byteLength;\n bufferSize -= buffer[0].data.byteLength;\n buffer.shift();\n } // find the start of the first frame and the end of the tag\n\n frameStart = 10;\n if (tag.data[5] & 0x40) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += id3.parseSyncSafeInteger(tag.data.subarray(10, 14)); // clip any padding off the end\n\n tagSize -= id3.parseSyncSafeInteger(tag.data.subarray(16, 20));\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n do {\n // determine the number of bytes in this frame\n frameSize = id3.parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8));\n if (frameSize < 1) {\n this.trigger('log', {\n level: 'warn',\n message: 'Malformed ID3 frame encountered. Skipping remaining metadata parsing.'\n }); // If the frame is malformed, don't parse any further frames but allow previous valid parsed frames\n // to be sent along.\n\n break;\n }\n frameHeader = String.fromCharCode(tag.data[frameStart], tag.data[frameStart + 1], tag.data[frameStart + 2], tag.data[frameStart + 3]);\n frame = {\n id: frameHeader,\n data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10)\n };\n frame.key = frame.id; // parse frame values\n\n if (id3.frameParsers[frame.id]) {\n // use frame specific parser\n id3.frameParsers[frame.id](frame);\n } else if (frame.id[0] === 'T') {\n // use text frame generic parser\n id3.frameParsers['T*'](frame);\n } else if (frame.id[0] === 'W') {\n // use URL link frame generic parser\n id3.frameParsers['W*'](frame);\n } // handle the special PRIV frame used to indicate the start\n // time for raw AAC data\n\n if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') {\n var d = frame.data,\n size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;\n size *= 4;\n size += d[7] & 0x03;\n frame.timeStamp = size; // in raw AAC, all subsequent data will be timestamped based\n // on the value of this frame\n // we couldn't have known the appropriate pts and dts before\n // parsing this ID3 tag so set those values now\n\n if (tag.pts === undefined && tag.dts === undefined) {\n tag.pts = frame.timeStamp;\n tag.dts = frame.timeStamp;\n }\n this.trigger('timestamp', frame);\n }\n tag.frames.push(frame);\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < tagSize);\n this.trigger('data', tag);\n };\n };\n MetadataStream.prototype = new Stream$5();\n var metadataStream = MetadataStream;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A stream-based mp2t to mp4 converter. This utility can be used to\n * deliver mp4s to a SourceBuffer on platforms that support native\n * Media Source Extensions.\n */\n\n var Stream$4 = stream,\n CaptionStream$1 = captionStream,\n StreamTypes$2 = streamTypes,\n TimestampRolloverStream = timestampRolloverStream.TimestampRolloverStream; // object types\n\n var TransportPacketStream, TransportParseStream, ElementaryStream; // constants\n\n var MP2T_PACKET_LENGTH$1 = 188,\n // bytes\n SYNC_BYTE$1 = 0x47;\n /**\n * Splits an incoming stream of binary data into MPEG-2 Transport\n * Stream packets.\n */\n\n TransportPacketStream = function () {\n var buffer = new Uint8Array(MP2T_PACKET_LENGTH$1),\n bytesInBuffer = 0;\n TransportPacketStream.prototype.init.call(this); // Deliver new bytes to the stream.\n\n /**\n * Split a stream of data into M2TS packets\n **/\n\n this.push = function (bytes) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH$1,\n everything; // If there are bytes remaining from the last segment, prepend them to the\n // bytes that were pushed in\n\n if (bytesInBuffer) {\n everything = new Uint8Array(bytes.byteLength + bytesInBuffer);\n everything.set(buffer.subarray(0, bytesInBuffer));\n everything.set(bytes, bytesInBuffer);\n bytesInBuffer = 0;\n } else {\n everything = bytes;\n } // While we have enough data for a packet\n\n while (endIndex < everything.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (everything[startIndex] === SYNC_BYTE$1 && everything[endIndex] === SYNC_BYTE$1) {\n // We found a packet so emit it and jump one whole packet forward in\n // the stream\n this.trigger('data', everything.subarray(startIndex, endIndex));\n startIndex += MP2T_PACKET_LENGTH$1;\n endIndex += MP2T_PACKET_LENGTH$1;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n } // If there was some data left over at the end of the segment that couldn't\n // possibly be a whole packet, keep it because it might be the start of a packet\n // that continues in the next segment\n\n if (startIndex < everything.byteLength) {\n buffer.set(everything.subarray(startIndex), 0);\n bytesInBuffer = everything.byteLength - startIndex;\n }\n };\n /**\n * Passes identified M2TS packets to the TransportParseStream to be parsed\n **/\n\n this.flush = function () {\n // If the buffer contains a whole packet when we are being flushed, emit it\n // and empty the buffer. Otherwise hold onto the data because it may be\n // important for decoding the next segment\n if (bytesInBuffer === MP2T_PACKET_LENGTH$1 && buffer[0] === SYNC_BYTE$1) {\n this.trigger('data', buffer);\n bytesInBuffer = 0;\n }\n this.trigger('done');\n };\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n this.reset = function () {\n bytesInBuffer = 0;\n this.trigger('reset');\n };\n };\n TransportPacketStream.prototype = new Stream$4();\n /**\n * Accepts an MP2T TransportPacketStream and emits data events with parsed\n * forms of the individual transport stream packets.\n */\n\n TransportParseStream = function () {\n var parsePsi, parsePat, parsePmt, self;\n TransportParseStream.prototype.init.call(this);\n self = this;\n this.packetsWaitingForPmt = [];\n this.programMapTable = undefined;\n parsePsi = function (payload, psi) {\n var offset = 0; // PSI packets may be split into multiple sections and those\n // sections may be split into multiple packets. If a PSI\n // section starts in this packet, the payload_unit_start_indicator\n // will be true and the first byte of the payload will indicate\n // the offset from the current position to the start of the\n // section.\n\n if (psi.payloadUnitStartIndicator) {\n offset += payload[offset] + 1;\n }\n if (psi.type === 'pat') {\n parsePat(payload.subarray(offset), psi);\n } else {\n parsePmt(payload.subarray(offset), psi);\n }\n };\n parsePat = function (payload, pat) {\n pat.section_number = payload[7]; // eslint-disable-line camelcase\n\n pat.last_section_number = payload[8]; // eslint-disable-line camelcase\n // skip the PSI header and parse the first PMT entry\n\n self.pmtPid = (payload[10] & 0x1F) << 8 | payload[11];\n pat.pmtPid = self.pmtPid;\n };\n /**\n * Parse out the relevant fields of a Program Map Table (PMT).\n * @param payload {Uint8Array} the PMT-specific portion of an MP2T\n * packet. The first byte in this array should be the table_id\n * field.\n * @param pmt {object} the object that should be decorated with\n * fields parsed from the PMT.\n */\n\n parsePmt = function (payload, pmt) {\n var sectionLength, tableEnd, programInfoLength, offset; // PMTs can be sent ahead of the time when they should actually\n // take effect. We don't believe this should ever be the case\n // for HLS but we'll ignore \"forward\" PMT declarations if we see\n // them. Future PMT declarations have the current_next_indicator\n // set to zero.\n\n if (!(payload[5] & 0x01)) {\n return;\n } // overwrite any existing program map table\n\n self.programMapTable = {\n video: null,\n audio: null,\n 'timed-metadata': {}\n }; // the mapping table ends at the end of the current section\n\n sectionLength = (payload[1] & 0x0f) << 8 | payload[2];\n tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n\n programInfoLength = (payload[10] & 0x0f) << 8 | payload[11]; // advance the offset to the first entry in the mapping table\n\n offset = 12 + programInfoLength;\n while (offset < tableEnd) {\n var streamType = payload[offset];\n var pid = (payload[offset + 1] & 0x1F) << 8 | payload[offset + 2]; // only map a single elementary_pid for audio and video stream types\n // TODO: should this be done for metadata too? for now maintain behavior of\n // multiple metadata streams\n\n if (streamType === StreamTypes$2.H264_STREAM_TYPE && self.programMapTable.video === null) {\n self.programMapTable.video = pid;\n } else if (streamType === StreamTypes$2.ADTS_STREAM_TYPE && self.programMapTable.audio === null) {\n self.programMapTable.audio = pid;\n } else if (streamType === StreamTypes$2.METADATA_STREAM_TYPE) {\n // map pid to stream type for metadata streams\n self.programMapTable['timed-metadata'][pid] = streamType;\n } // move to the next table entry\n // skip past the elementary stream descriptors, if present\n\n offset += ((payload[offset + 3] & 0x0F) << 8 | payload[offset + 4]) + 5;\n } // record the map on the packet as well\n\n pmt.programMapTable = self.programMapTable;\n };\n /**\n * Deliver a new MP2T packet to the next stream in the pipeline.\n */\n\n this.push = function (packet) {\n var result = {},\n offset = 4;\n result.payloadUnitStartIndicator = !!(packet[1] & 0x40); // pid is a 13-bit field starting at the last bit of packet[1]\n\n result.pid = packet[1] & 0x1f;\n result.pid <<= 8;\n result.pid |= packet[2]; // if an adaption field is present, its length is specified by the\n // fifth byte of the TS packet header. The adaptation field is\n // used to add stuffing to PES packets that don't fill a complete\n // TS packet, and to specify some forms of timing and control data\n // that we do not currently use.\n\n if ((packet[3] & 0x30) >>> 4 > 0x01) {\n offset += packet[offset] + 1;\n } // parse the rest of the packet based on the type\n\n if (result.pid === 0) {\n result.type = 'pat';\n parsePsi(packet.subarray(offset), result);\n this.trigger('data', result);\n } else if (result.pid === this.pmtPid) {\n result.type = 'pmt';\n parsePsi(packet.subarray(offset), result);\n this.trigger('data', result); // if there are any packets waiting for a PMT to be found, process them now\n\n while (this.packetsWaitingForPmt.length) {\n this.processPes_.apply(this, this.packetsWaitingForPmt.shift());\n }\n } else if (this.programMapTable === undefined) {\n // When we have not seen a PMT yet, defer further processing of\n // PES packets until one has been parsed\n this.packetsWaitingForPmt.push([packet, offset, result]);\n } else {\n this.processPes_(packet, offset, result);\n }\n };\n this.processPes_ = function (packet, offset, result) {\n // set the appropriate stream type\n if (result.pid === this.programMapTable.video) {\n result.streamType = StreamTypes$2.H264_STREAM_TYPE;\n } else if (result.pid === this.programMapTable.audio) {\n result.streamType = StreamTypes$2.ADTS_STREAM_TYPE;\n } else {\n // if not video or audio, it is timed-metadata or unknown\n // if unknown, streamType will be undefined\n result.streamType = this.programMapTable['timed-metadata'][result.pid];\n }\n result.type = 'pes';\n result.data = packet.subarray(offset);\n this.trigger('data', result);\n };\n };\n TransportParseStream.prototype = new Stream$4();\n TransportParseStream.STREAM_TYPES = {\n h264: 0x1b,\n adts: 0x0f\n };\n /**\n * Reconsistutes program elementary stream (PES) packets from parsed\n * transport stream packets. That is, if you pipe an\n * mp2t.TransportParseStream into a mp2t.ElementaryStream, the output\n * events will be events which capture the bytes for individual PES\n * packets plus relevant metadata that has been extracted from the\n * container.\n */\n\n ElementaryStream = function () {\n var self = this,\n segmentHadPmt = false,\n // PES packet fragments\n video = {\n data: [],\n size: 0\n },\n audio = {\n data: [],\n size: 0\n },\n timedMetadata = {\n data: [],\n size: 0\n },\n programMapTable,\n parsePes = function (payload, pes) {\n var ptsDtsFlags;\n const startPrefix = payload[0] << 16 | payload[1] << 8 | payload[2]; // default to an empty array\n\n pes.data = new Uint8Array(); // In certain live streams, the start of a TS fragment has ts packets\n // that are frame data that is continuing from the previous fragment. This\n // is to check that the pes data is the start of a new pes payload\n\n if (startPrefix !== 1) {\n return;\n } // get the packet length, this will be 0 for video\n\n pes.packetLength = 6 + (payload[4] << 8 | payload[5]); // find out if this packets starts a new keyframe\n\n pes.dataAlignmentIndicator = (payload[6] & 0x04) !== 0; // PES packets may be annotated with a PTS value, or a PTS value\n // and a DTS value. Determine what combination of values is\n // available to work with.\n\n ptsDtsFlags = payload[7]; // PTS and DTS are normally stored as a 33-bit number. Javascript\n // performs all bitwise operations on 32-bit integers but javascript\n // supports a much greater range (52-bits) of integer using standard\n // mathematical operations.\n // We construct a 31-bit value using bitwise operators over the 31\n // most significant bits and then multiply by 4 (equal to a left-shift\n // of 2) before we add the final 2 least significant bits of the\n // timestamp (equal to an OR.)\n\n if (ptsDtsFlags & 0xC0) {\n // the PTS and DTS are not written out directly. For information\n // on how they are encoded, see\n // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n pes.pts = (payload[9] & 0x0E) << 27 | (payload[10] & 0xFF) << 20 | (payload[11] & 0xFE) << 12 | (payload[12] & 0xFF) << 5 | (payload[13] & 0xFE) >>> 3;\n pes.pts *= 4; // Left shift by 2\n\n pes.pts += (payload[13] & 0x06) >>> 1; // OR by the two LSBs\n\n pes.dts = pes.pts;\n if (ptsDtsFlags & 0x40) {\n pes.dts = (payload[14] & 0x0E) << 27 | (payload[15] & 0xFF) << 20 | (payload[16] & 0xFE) << 12 | (payload[17] & 0xFF) << 5 | (payload[18] & 0xFE) >>> 3;\n pes.dts *= 4; // Left shift by 2\n\n pes.dts += (payload[18] & 0x06) >>> 1; // OR by the two LSBs\n }\n } // the data section starts immediately after the PES header.\n // pes_header_data_length specifies the number of header bytes\n // that follow the last byte of the field.\n\n pes.data = payload.subarray(9 + payload[8]);\n },\n /**\n * Pass completely parsed PES packets to the next stream in the pipeline\n **/\n flushStream = function (stream, type, forceFlush) {\n var packetData = new Uint8Array(stream.size),\n event = {\n type: type\n },\n i = 0,\n offset = 0,\n packetFlushable = false,\n fragment; // do nothing if there is not enough buffered data for a complete\n // PES header\n\n if (!stream.data.length || stream.size < 9) {\n return;\n }\n event.trackId = stream.data[0].pid; // reassemble the packet\n\n for (i = 0; i < stream.data.length; i++) {\n fragment = stream.data[i];\n packetData.set(fragment.data, offset);\n offset += fragment.data.byteLength;\n } // parse assembled packet's PES header\n\n parsePes(packetData, event); // non-video PES packets MUST have a non-zero PES_packet_length\n // check that there is enough stream data to fill the packet\n\n packetFlushable = type === 'video' || event.packetLength <= stream.size; // flush pending packets if the conditions are right\n\n if (forceFlush || packetFlushable) {\n stream.size = 0;\n stream.data.length = 0;\n } // only emit packets that are complete. this is to avoid assembling\n // incomplete PES packets due to poor segmentation\n\n if (packetFlushable) {\n self.trigger('data', event);\n }\n };\n ElementaryStream.prototype.init.call(this);\n /**\n * Identifies M2TS packet types and parses PES packets using metadata\n * parsed from the PMT\n **/\n\n this.push = function (data) {\n ({\n pat: function () {// we have to wait for the PMT to arrive as well before we\n // have any meaningful metadata\n },\n pes: function () {\n var stream, streamType;\n switch (data.streamType) {\n case StreamTypes$2.H264_STREAM_TYPE:\n stream = video;\n streamType = 'video';\n break;\n case StreamTypes$2.ADTS_STREAM_TYPE:\n stream = audio;\n streamType = 'audio';\n break;\n case StreamTypes$2.METADATA_STREAM_TYPE:\n stream = timedMetadata;\n streamType = 'timed-metadata';\n break;\n default:\n // ignore unknown stream types\n return;\n } // if a new packet is starting, we can flush the completed\n // packet\n\n if (data.payloadUnitStartIndicator) {\n flushStream(stream, streamType, true);\n } // buffer this fragment until we are sure we've received the\n // complete payload\n\n stream.data.push(data);\n stream.size += data.data.byteLength;\n },\n pmt: function () {\n var event = {\n type: 'metadata',\n tracks: []\n };\n programMapTable = data.programMapTable; // translate audio and video streams to tracks\n\n if (programMapTable.video !== null) {\n event.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.video,\n codec: 'avc',\n type: 'video'\n });\n }\n if (programMapTable.audio !== null) {\n event.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.audio,\n codec: 'adts',\n type: 'audio'\n });\n }\n segmentHadPmt = true;\n self.trigger('data', event);\n }\n })[data.type]();\n };\n this.reset = function () {\n video.size = 0;\n video.data.length = 0;\n audio.size = 0;\n audio.data.length = 0;\n this.trigger('reset');\n };\n /**\n * Flush any remaining input. Video PES packets may be of variable\n * length. Normally, the start of a new video packet can trigger the\n * finalization of the previous packet. That is not possible if no\n * more video is forthcoming, however. In that case, some other\n * mechanism (like the end of the file) has to be employed. When it is\n * clear that no additional data is forthcoming, calling this method\n * will flush the buffered packets.\n */\n\n this.flushStreams_ = function () {\n // !!THIS ORDER IS IMPORTANT!!\n // video first then audio\n flushStream(video, 'video');\n flushStream(audio, 'audio');\n flushStream(timedMetadata, 'timed-metadata');\n };\n this.flush = function () {\n // if on flush we haven't had a pmt emitted\n // and we have a pmt to emit. emit the pmt\n // so that we trigger a trackinfo downstream.\n if (!segmentHadPmt && programMapTable) {\n var pmt = {\n type: 'metadata',\n tracks: []\n }; // translate audio and video streams to tracks\n\n if (programMapTable.video !== null) {\n pmt.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.video,\n codec: 'avc',\n type: 'video'\n });\n }\n if (programMapTable.audio !== null) {\n pmt.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.audio,\n codec: 'adts',\n type: 'audio'\n });\n }\n self.trigger('data', pmt);\n }\n segmentHadPmt = false;\n this.flushStreams_();\n this.trigger('done');\n };\n };\n ElementaryStream.prototype = new Stream$4();\n var m2ts$1 = {\n PAT_PID: 0x0000,\n MP2T_PACKET_LENGTH: MP2T_PACKET_LENGTH$1,\n TransportPacketStream: TransportPacketStream,\n TransportParseStream: TransportParseStream,\n ElementaryStream: ElementaryStream,\n TimestampRolloverStream: TimestampRolloverStream,\n CaptionStream: CaptionStream$1.CaptionStream,\n Cea608Stream: CaptionStream$1.Cea608Stream,\n Cea708Stream: CaptionStream$1.Cea708Stream,\n MetadataStream: metadataStream\n };\n for (var type in StreamTypes$2) {\n if (StreamTypes$2.hasOwnProperty(type)) {\n m2ts$1[type] = StreamTypes$2[type];\n }\n }\n var m2ts_1 = m2ts$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var Stream$3 = stream;\n var ONE_SECOND_IN_TS$2 = clock$2.ONE_SECOND_IN_TS;\n var AdtsStream$1;\n var ADTS_SAMPLING_FREQUENCIES$1 = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];\n /*\n * Accepts a ElementaryStream and emits data events with parsed\n * AAC Audio Frames of the individual packets. Input audio in ADTS\n * format is unpacked and re-emitted as AAC frames.\n *\n * @see http://wiki.multimedia.cx/index.php?title=ADTS\n * @see http://wiki.multimedia.cx/?title=Understanding_AAC\n */\n\n AdtsStream$1 = function (handlePartialSegments) {\n var buffer,\n frameNum = 0;\n AdtsStream$1.prototype.init.call(this);\n this.skipWarn_ = function (start, end) {\n this.trigger('log', {\n level: 'warn',\n message: `adts skiping bytes ${start} to ${end} in frame ${frameNum} outside syncword`\n });\n };\n this.push = function (packet) {\n var i = 0,\n frameLength,\n protectionSkipBytes,\n oldBuffer,\n sampleCount,\n adtsFrameDuration;\n if (!handlePartialSegments) {\n frameNum = 0;\n }\n if (packet.type !== 'audio') {\n // ignore non-audio data\n return;\n } // Prepend any data in the buffer to the input data so that we can parse\n // aac frames the cross a PES packet boundary\n\n if (buffer && buffer.length) {\n oldBuffer = buffer;\n buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength);\n buffer.set(oldBuffer);\n buffer.set(packet.data, oldBuffer.byteLength);\n } else {\n buffer = packet.data;\n } // unpack any ADTS frames which have been fully received\n // for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS\n\n var skip; // We use i + 7 here because we want to be able to parse the entire header.\n // If we don't have enough bytes to do that, then we definitely won't have a full frame.\n\n while (i + 7 < buffer.length) {\n // Look for the start of an ADTS header..\n if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) {\n if (typeof skip !== 'number') {\n skip = i;\n } // If a valid header was not found, jump one forward and attempt to\n // find a valid ADTS header starting at the next byte\n\n i++;\n continue;\n }\n if (typeof skip === 'number') {\n this.skipWarn_(skip, i);\n skip = null;\n } // The protection skip bit tells us if we have 2 bytes of CRC data at the\n // end of the ADTS header\n\n protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2; // Frame length is a 13 bit integer starting 16 bits from the\n // end of the sync sequence\n // NOTE: frame length includes the size of the header\n\n frameLength = (buffer[i + 3] & 0x03) << 11 | buffer[i + 4] << 3 | (buffer[i + 5] & 0xe0) >> 5;\n sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024;\n adtsFrameDuration = sampleCount * ONE_SECOND_IN_TS$2 / ADTS_SAMPLING_FREQUENCIES$1[(buffer[i + 2] & 0x3c) >>> 2]; // If we don't have enough data to actually finish this ADTS frame,\n // then we have to wait for more data\n\n if (buffer.byteLength - i < frameLength) {\n break;\n } // Otherwise, deliver the complete AAC frame\n\n this.trigger('data', {\n pts: packet.pts + frameNum * adtsFrameDuration,\n dts: packet.dts + frameNum * adtsFrameDuration,\n sampleCount: sampleCount,\n audioobjecttype: (buffer[i + 2] >>> 6 & 0x03) + 1,\n channelcount: (buffer[i + 2] & 1) << 2 | (buffer[i + 3] & 0xc0) >>> 6,\n samplerate: ADTS_SAMPLING_FREQUENCIES$1[(buffer[i + 2] & 0x3c) >>> 2],\n samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2,\n // assume ISO/IEC 14496-12 AudioSampleEntry default of 16\n samplesize: 16,\n // data is the frame without it's header\n data: buffer.subarray(i + 7 + protectionSkipBytes, i + frameLength)\n });\n frameNum++;\n i += frameLength;\n }\n if (typeof skip === 'number') {\n this.skipWarn_(skip, i);\n skip = null;\n } // remove processed bytes from the buffer.\n\n buffer = buffer.subarray(i);\n };\n this.flush = function () {\n frameNum = 0;\n this.trigger('done');\n };\n this.reset = function () {\n buffer = void 0;\n this.trigger('reset');\n };\n this.endTimeline = function () {\n buffer = void 0;\n this.trigger('endedtimeline');\n };\n };\n AdtsStream$1.prototype = new Stream$3();\n var adts = AdtsStream$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ExpGolomb$1;\n /**\n * Parser for exponential Golomb codes, a variable-bitwidth number encoding\n * scheme used by h264.\n */\n\n ExpGolomb$1 = function (workingData) {\n var\n // the number of bytes left to examine in workingData\n workingBytesAvailable = workingData.byteLength,\n // the current word being examined\n workingWord = 0,\n // :uint\n // the number of bits left to examine in the current word\n workingBitsAvailable = 0; // :uint;\n // ():uint\n\n this.length = function () {\n return 8 * workingBytesAvailable;\n }; // ():uint\n\n this.bitsAvailable = function () {\n return 8 * workingBytesAvailable + workingBitsAvailable;\n }; // ():void\n\n this.loadWord = function () {\n var position = workingData.byteLength - workingBytesAvailable,\n workingBytes = new Uint8Array(4),\n availableBytes = Math.min(4, workingBytesAvailable);\n if (availableBytes === 0) {\n throw new Error('no bytes available');\n }\n workingBytes.set(workingData.subarray(position, position + availableBytes));\n workingWord = new DataView(workingBytes.buffer).getUint32(0); // track the amount of workingData that has been processed\n\n workingBitsAvailable = availableBytes * 8;\n workingBytesAvailable -= availableBytes;\n }; // (count:int):void\n\n this.skipBits = function (count) {\n var skipBytes; // :int\n\n if (workingBitsAvailable > count) {\n workingWord <<= count;\n workingBitsAvailable -= count;\n } else {\n count -= workingBitsAvailable;\n skipBytes = Math.floor(count / 8);\n count -= skipBytes * 8;\n workingBytesAvailable -= skipBytes;\n this.loadWord();\n workingWord <<= count;\n workingBitsAvailable -= count;\n }\n }; // (size:int):uint\n\n this.readBits = function (size) {\n var bits = Math.min(workingBitsAvailable, size),\n // :uint\n valu = workingWord >>> 32 - bits; // :uint\n // if size > 31, handle error\n\n workingBitsAvailable -= bits;\n if (workingBitsAvailable > 0) {\n workingWord <<= bits;\n } else if (workingBytesAvailable > 0) {\n this.loadWord();\n }\n bits = size - bits;\n if (bits > 0) {\n return valu << bits | this.readBits(bits);\n }\n return valu;\n }; // ():uint\n\n this.skipLeadingZeros = function () {\n var leadingZeroCount; // :uint\n\n for (leadingZeroCount = 0; leadingZeroCount < workingBitsAvailable; ++leadingZeroCount) {\n if ((workingWord & 0x80000000 >>> leadingZeroCount) !== 0) {\n // the first bit of working word is 1\n workingWord <<= leadingZeroCount;\n workingBitsAvailable -= leadingZeroCount;\n return leadingZeroCount;\n }\n } // we exhausted workingWord and still have not found a 1\n\n this.loadWord();\n return leadingZeroCount + this.skipLeadingZeros();\n }; // ():void\n\n this.skipUnsignedExpGolomb = function () {\n this.skipBits(1 + this.skipLeadingZeros());\n }; // ():void\n\n this.skipExpGolomb = function () {\n this.skipBits(1 + this.skipLeadingZeros());\n }; // ():uint\n\n this.readUnsignedExpGolomb = function () {\n var clz = this.skipLeadingZeros(); // :uint\n\n return this.readBits(clz + 1) - 1;\n }; // ():int\n\n this.readExpGolomb = function () {\n var valu = this.readUnsignedExpGolomb(); // :int\n\n if (0x01 & valu) {\n // the number is odd if the low order bit is set\n return 1 + valu >>> 1; // add 1 to make it even, and divide by 2\n }\n return -1 * (valu >>> 1); // divide by two then make it negative\n }; // Some convenience functions\n // :Boolean\n\n this.readBoolean = function () {\n return this.readBits(1) === 1;\n }; // ():int\n\n this.readUnsignedByte = function () {\n return this.readBits(8);\n };\n this.loadWord();\n };\n var expGolomb = ExpGolomb$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var Stream$2 = stream;\n var ExpGolomb = expGolomb;\n var H264Stream$1, NalByteStream;\n var PROFILES_WITH_OPTIONAL_SPS_DATA;\n /**\n * Accepts a NAL unit byte stream and unpacks the embedded NAL units.\n */\n\n NalByteStream = function () {\n var syncPoint = 0,\n i,\n buffer;\n NalByteStream.prototype.init.call(this);\n /*\n * Scans a byte stream and triggers a data event with the NAL units found.\n * @param {Object} data Event received from H264Stream\n * @param {Uint8Array} data.data The h264 byte stream to be scanned\n *\n * @see H264Stream.push\n */\n\n this.push = function (data) {\n var swapBuffer;\n if (!buffer) {\n buffer = data.data;\n } else {\n swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength);\n swapBuffer.set(buffer);\n swapBuffer.set(data.data, buffer.byteLength);\n buffer = swapBuffer;\n }\n var len = buffer.byteLength; // Rec. ITU-T H.264, Annex B\n // scan for NAL unit boundaries\n // a match looks like this:\n // 0 0 1 .. NAL .. 0 0 1\n // ^ sync point ^ i\n // or this:\n // 0 0 1 .. NAL .. 0 0 0\n // ^ sync point ^ i\n // advance the sync point to a NAL start, if necessary\n\n for (; syncPoint < len - 3; syncPoint++) {\n if (buffer[syncPoint + 2] === 1) {\n // the sync point is properly aligned\n i = syncPoint + 5;\n break;\n }\n }\n while (i < len) {\n // look at the current byte to determine if we've hit the end of\n // a NAL unit boundary\n switch (buffer[i]) {\n case 0:\n // skip past non-sync sequences\n if (buffer[i - 1] !== 0) {\n i += 2;\n break;\n } else if (buffer[i - 2] !== 0) {\n i++;\n break;\n } // deliver the NAL unit if it isn't empty\n\n if (syncPoint + 3 !== i - 2) {\n this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));\n } // drop trailing zeroes\n\n do {\n i++;\n } while (buffer[i] !== 1 && i < len);\n syncPoint = i - 2;\n i += 3;\n break;\n case 1:\n // skip past non-sync sequences\n if (buffer[i - 1] !== 0 || buffer[i - 2] !== 0) {\n i += 3;\n break;\n } // deliver the NAL unit\n\n this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));\n syncPoint = i - 2;\n i += 3;\n break;\n default:\n // the current byte isn't a one or zero, so it cannot be part\n // of a sync sequence\n i += 3;\n break;\n }\n } // filter out the NAL units that were delivered\n\n buffer = buffer.subarray(syncPoint);\n i -= syncPoint;\n syncPoint = 0;\n };\n this.reset = function () {\n buffer = null;\n syncPoint = 0;\n this.trigger('reset');\n };\n this.flush = function () {\n // deliver the last buffered NAL unit\n if (buffer && buffer.byteLength > 3) {\n this.trigger('data', buffer.subarray(syncPoint + 3));\n } // reset the stream state\n\n buffer = null;\n syncPoint = 0;\n this.trigger('done');\n };\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n };\n NalByteStream.prototype = new Stream$2(); // values of profile_idc that indicate additional fields are included in the SPS\n // see Recommendation ITU-T H.264 (4/2013),\n // 7.3.2.1.1 Sequence parameter set data syntax\n\n PROFILES_WITH_OPTIONAL_SPS_DATA = {\n 100: true,\n 110: true,\n 122: true,\n 244: true,\n 44: true,\n 83: true,\n 86: true,\n 118: true,\n 128: true,\n // TODO: the three profiles below don't\n // appear to have sps data in the specificiation anymore?\n 138: true,\n 139: true,\n 134: true\n };\n /**\n * Accepts input from a ElementaryStream and produces H.264 NAL unit data\n * events.\n */\n\n H264Stream$1 = function () {\n var nalByteStream = new NalByteStream(),\n self,\n trackId,\n currentPts,\n currentDts,\n discardEmulationPreventionBytes,\n readSequenceParameterSet,\n skipScalingList;\n H264Stream$1.prototype.init.call(this);\n self = this;\n /*\n * Pushes a packet from a stream onto the NalByteStream\n *\n * @param {Object} packet - A packet received from a stream\n * @param {Uint8Array} packet.data - The raw bytes of the packet\n * @param {Number} packet.dts - Decode timestamp of the packet\n * @param {Number} packet.pts - Presentation timestamp of the packet\n * @param {Number} packet.trackId - The id of the h264 track this packet came from\n * @param {('video'|'audio')} packet.type - The type of packet\n *\n */\n\n this.push = function (packet) {\n if (packet.type !== 'video') {\n return;\n }\n trackId = packet.trackId;\n currentPts = packet.pts;\n currentDts = packet.dts;\n nalByteStream.push(packet);\n };\n /*\n * Identify NAL unit types and pass on the NALU, trackId, presentation and decode timestamps\n * for the NALUs to the next stream component.\n * Also, preprocess caption and sequence parameter NALUs.\n *\n * @param {Uint8Array} data - A NAL unit identified by `NalByteStream.push`\n * @see NalByteStream.push\n */\n\n nalByteStream.on('data', function (data) {\n var event = {\n trackId: trackId,\n pts: currentPts,\n dts: currentDts,\n data: data,\n nalUnitTypeCode: data[0] & 0x1f\n };\n switch (event.nalUnitTypeCode) {\n case 0x05:\n event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr';\n break;\n case 0x06:\n event.nalUnitType = 'sei_rbsp';\n event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));\n break;\n case 0x07:\n event.nalUnitType = 'seq_parameter_set_rbsp';\n event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));\n event.config = readSequenceParameterSet(event.escapedRBSP);\n break;\n case 0x08:\n event.nalUnitType = 'pic_parameter_set_rbsp';\n break;\n case 0x09:\n event.nalUnitType = 'access_unit_delimiter_rbsp';\n break;\n } // This triggers data on the H264Stream\n\n self.trigger('data', event);\n });\n nalByteStream.on('done', function () {\n self.trigger('done');\n });\n nalByteStream.on('partialdone', function () {\n self.trigger('partialdone');\n });\n nalByteStream.on('reset', function () {\n self.trigger('reset');\n });\n nalByteStream.on('endedtimeline', function () {\n self.trigger('endedtimeline');\n });\n this.flush = function () {\n nalByteStream.flush();\n };\n this.partialFlush = function () {\n nalByteStream.partialFlush();\n };\n this.reset = function () {\n nalByteStream.reset();\n };\n this.endTimeline = function () {\n nalByteStream.endTimeline();\n };\n /**\n * Advance the ExpGolomb decoder past a scaling list. The scaling\n * list is optionally transmitted as part of a sequence parameter\n * set and is not relevant to transmuxing.\n * @param count {number} the number of entries in this scaling list\n * @param expGolombDecoder {object} an ExpGolomb pointed to the\n * start of a scaling list\n * @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1\n */\n\n skipScalingList = function (count, expGolombDecoder) {\n var lastScale = 8,\n nextScale = 8,\n j,\n deltaScale;\n for (j = 0; j < count; j++) {\n if (nextScale !== 0) {\n deltaScale = expGolombDecoder.readExpGolomb();\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n };\n /**\n * Expunge any \"Emulation Prevention\" bytes from a \"Raw Byte\n * Sequence Payload\"\n * @param data {Uint8Array} the bytes of a RBSP from a NAL\n * unit\n * @return {Uint8Array} the RBSP without any Emulation\n * Prevention Bytes\n */\n\n discardEmulationPreventionBytes = function (data) {\n var length = data.byteLength,\n emulationPreventionBytesPositions = [],\n i = 1,\n newLength,\n newData; // Find all `Emulation Prevention Bytes`\n\n while (i < length - 2) {\n if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {\n emulationPreventionBytesPositions.push(i + 2);\n i += 2;\n } else {\n i++;\n }\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n if (emulationPreventionBytesPositions.length === 0) {\n return data;\n } // Create a new array to hold the NAL unit data\n\n newLength = length - emulationPreventionBytesPositions.length;\n newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === emulationPreventionBytesPositions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n emulationPreventionBytesPositions.shift();\n }\n newData[i] = data[sourceIndex];\n }\n return newData;\n };\n /**\n * Read a sequence parameter set and return some interesting video\n * properties. A sequence parameter set is the H264 metadata that\n * describes the properties of upcoming video frames.\n * @param data {Uint8Array} the bytes of a sequence parameter set\n * @return {object} an object with configuration parsed from the\n * sequence parameter set, including the dimensions of the\n * associated video frames.\n */\n\n readSequenceParameterSet = function (data) {\n var frameCropLeftOffset = 0,\n frameCropRightOffset = 0,\n frameCropTopOffset = 0,\n frameCropBottomOffset = 0,\n expGolombDecoder,\n profileIdc,\n levelIdc,\n profileCompatibility,\n chromaFormatIdc,\n picOrderCntType,\n numRefFramesInPicOrderCntCycle,\n picWidthInMbsMinus1,\n picHeightInMapUnitsMinus1,\n frameMbsOnlyFlag,\n scalingListCount,\n sarRatio = [1, 1],\n aspectRatioIdc,\n i;\n expGolombDecoder = new ExpGolomb(data);\n profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc\n\n profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag\n\n levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8)\n\n expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id\n // some profiles have more optional data we don't need\n\n if (PROFILES_WITH_OPTIONAL_SPS_DATA[profileIdc]) {\n chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb();\n if (chromaFormatIdc === 3) {\n expGolombDecoder.skipBits(1); // separate_colour_plane_flag\n }\n expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8\n\n expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8\n\n expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag\n\n if (expGolombDecoder.readBoolean()) {\n // seq_scaling_matrix_present_flag\n scalingListCount = chromaFormatIdc !== 3 ? 8 : 12;\n for (i = 0; i < scalingListCount; i++) {\n if (expGolombDecoder.readBoolean()) {\n // seq_scaling_list_present_flag[ i ]\n if (i < 6) {\n skipScalingList(16, expGolombDecoder);\n } else {\n skipScalingList(64, expGolombDecoder);\n }\n }\n }\n }\n }\n expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4\n\n picOrderCntType = expGolombDecoder.readUnsignedExpGolomb();\n if (picOrderCntType === 0) {\n expGolombDecoder.readUnsignedExpGolomb(); // log2_max_pic_order_cnt_lsb_minus4\n } else if (picOrderCntType === 1) {\n expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag\n\n expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic\n\n expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field\n\n numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb();\n for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) {\n expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ]\n }\n }\n expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames\n\n expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag\n\n picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb();\n picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb();\n frameMbsOnlyFlag = expGolombDecoder.readBits(1);\n if (frameMbsOnlyFlag === 0) {\n expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag\n }\n expGolombDecoder.skipBits(1); // direct_8x8_inference_flag\n\n if (expGolombDecoder.readBoolean()) {\n // frame_cropping_flag\n frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb();\n }\n if (expGolombDecoder.readBoolean()) {\n // vui_parameters_present_flag\n if (expGolombDecoder.readBoolean()) {\n // aspect_ratio_info_present_flag\n aspectRatioIdc = expGolombDecoder.readUnsignedByte();\n switch (aspectRatioIdc) {\n case 1:\n sarRatio = [1, 1];\n break;\n case 2:\n sarRatio = [12, 11];\n break;\n case 3:\n sarRatio = [10, 11];\n break;\n case 4:\n sarRatio = [16, 11];\n break;\n case 5:\n sarRatio = [40, 33];\n break;\n case 6:\n sarRatio = [24, 11];\n break;\n case 7:\n sarRatio = [20, 11];\n break;\n case 8:\n sarRatio = [32, 11];\n break;\n case 9:\n sarRatio = [80, 33];\n break;\n case 10:\n sarRatio = [18, 11];\n break;\n case 11:\n sarRatio = [15, 11];\n break;\n case 12:\n sarRatio = [64, 33];\n break;\n case 13:\n sarRatio = [160, 99];\n break;\n case 14:\n sarRatio = [4, 3];\n break;\n case 15:\n sarRatio = [3, 2];\n break;\n case 16:\n sarRatio = [2, 1];\n break;\n case 255:\n {\n sarRatio = [expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte(), expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte()];\n break;\n }\n }\n if (sarRatio) {\n sarRatio[0] / sarRatio[1];\n }\n }\n }\n return {\n profileIdc: profileIdc,\n levelIdc: levelIdc,\n profileCompatibility: profileCompatibility,\n width: (picWidthInMbsMinus1 + 1) * 16 - frameCropLeftOffset * 2 - frameCropRightOffset * 2,\n height: (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 - frameCropTopOffset * 2 - frameCropBottomOffset * 2,\n // sar is sample aspect ratio\n sarRatio: sarRatio\n };\n };\n };\n H264Stream$1.prototype = new Stream$2();\n var h264 = {\n H264Stream: H264Stream$1,\n NalByteStream: NalByteStream\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Utilities to detect basic properties and metadata about Aac data.\n */\n\n var ADTS_SAMPLING_FREQUENCIES = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];\n var parseId3TagSize = function (header, byteIndex) {\n var returnSize = header[byteIndex + 6] << 21 | header[byteIndex + 7] << 14 | header[byteIndex + 8] << 7 | header[byteIndex + 9],\n flags = header[byteIndex + 5],\n footerPresent = (flags & 16) >> 4; // if we get a negative returnSize clamp it to 0\n\n returnSize = returnSize >= 0 ? returnSize : 0;\n if (footerPresent) {\n return returnSize + 20;\n }\n return returnSize + 10;\n };\n var getId3Offset = function (data, offset) {\n if (data.length - offset < 10 || data[offset] !== 'I'.charCodeAt(0) || data[offset + 1] !== 'D'.charCodeAt(0) || data[offset + 2] !== '3'.charCodeAt(0)) {\n return offset;\n }\n offset += parseId3TagSize(data, offset);\n return getId3Offset(data, offset);\n }; // TODO: use vhs-utils\n\n var isLikelyAacData$1 = function (data) {\n var offset = getId3Offset(data, 0);\n return data.length >= offset + 2 && (data[offset] & 0xFF) === 0xFF && (data[offset + 1] & 0xF0) === 0xF0 &&\n // verify that the 2 layer bits are 0, aka this\n // is not mp3 data but aac data.\n (data[offset + 1] & 0x16) === 0x10;\n };\n var parseSyncSafeInteger = function (data) {\n return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];\n }; // return a percent-encoded representation of the specified byte range\n // @see http://en.wikipedia.org/wiki/Percent-encoding\n\n var percentEncode = function (bytes, start, end) {\n var i,\n result = '';\n for (i = start; i < end; i++) {\n result += '%' + ('00' + bytes[i].toString(16)).slice(-2);\n }\n return result;\n }; // return the string representation of the specified byte range,\n // interpreted as ISO-8859-1.\n\n var parseIso88591 = function (bytes, start, end) {\n return unescape(percentEncode(bytes, start, end)); // jshint ignore:line\n };\n var parseAdtsSize = function (header, byteIndex) {\n var lowThree = (header[byteIndex + 5] & 0xE0) >> 5,\n middle = header[byteIndex + 4] << 3,\n highTwo = header[byteIndex + 3] & 0x3 << 11;\n return highTwo | middle | lowThree;\n };\n var parseType$4 = function (header, byteIndex) {\n if (header[byteIndex] === 'I'.charCodeAt(0) && header[byteIndex + 1] === 'D'.charCodeAt(0) && header[byteIndex + 2] === '3'.charCodeAt(0)) {\n return 'timed-metadata';\n } else if (header[byteIndex] & 0xff === 0xff && (header[byteIndex + 1] & 0xf0) === 0xf0) {\n return 'audio';\n }\n return null;\n };\n var parseSampleRate = function (packet) {\n var i = 0;\n while (i + 5 < packet.length) {\n if (packet[i] !== 0xFF || (packet[i + 1] & 0xF6) !== 0xF0) {\n // If a valid header was not found, jump one forward and attempt to\n // find a valid ADTS header starting at the next byte\n i++;\n continue;\n }\n return ADTS_SAMPLING_FREQUENCIES[(packet[i + 2] & 0x3c) >>> 2];\n }\n return null;\n };\n var parseAacTimestamp = function (packet) {\n var frameStart, frameSize, frame, frameHeader; // find the start of the first frame and the end of the tag\n\n frameStart = 10;\n if (packet[5] & 0x40) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += parseSyncSafeInteger(packet.subarray(10, 14));\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n do {\n // determine the number of bytes in this frame\n frameSize = parseSyncSafeInteger(packet.subarray(frameStart + 4, frameStart + 8));\n if (frameSize < 1) {\n return null;\n }\n frameHeader = String.fromCharCode(packet[frameStart], packet[frameStart + 1], packet[frameStart + 2], packet[frameStart + 3]);\n if (frameHeader === 'PRIV') {\n frame = packet.subarray(frameStart + 10, frameStart + frameSize + 10);\n for (var i = 0; i < frame.byteLength; i++) {\n if (frame[i] === 0) {\n var owner = parseIso88591(frame, 0, i);\n if (owner === 'com.apple.streaming.transportStreamTimestamp') {\n var d = frame.subarray(i + 1);\n var size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;\n size *= 4;\n size += d[7] & 0x03;\n return size;\n }\n break;\n }\n }\n }\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < packet.byteLength);\n return null;\n };\n var utils = {\n isLikelyAacData: isLikelyAacData$1,\n parseId3TagSize: parseId3TagSize,\n parseAdtsSize: parseAdtsSize,\n parseType: parseType$4,\n parseSampleRate: parseSampleRate,\n parseAacTimestamp: parseAacTimestamp\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A stream-based aac to mp4 converter. This utility can be used to\n * deliver mp4s to a SourceBuffer on platforms that support native\n * Media Source Extensions.\n */\n\n var Stream$1 = stream;\n var aacUtils = utils; // Constants\n\n var AacStream$1;\n /**\n * Splits an incoming stream of binary data into ADTS and ID3 Frames.\n */\n\n AacStream$1 = function () {\n var everything = new Uint8Array(),\n timeStamp = 0;\n AacStream$1.prototype.init.call(this);\n this.setTimestamp = function (timestamp) {\n timeStamp = timestamp;\n };\n this.push = function (bytes) {\n var frameSize = 0,\n byteIndex = 0,\n bytesLeft,\n chunk,\n packet,\n tempLength; // If there are bytes remaining from the last segment, prepend them to the\n // bytes that were pushed in\n\n if (everything.length) {\n tempLength = everything.length;\n everything = new Uint8Array(bytes.byteLength + tempLength);\n everything.set(everything.subarray(0, tempLength));\n everything.set(bytes, tempLength);\n } else {\n everything = bytes;\n }\n while (everything.length - byteIndex >= 3) {\n if (everything[byteIndex] === 'I'.charCodeAt(0) && everything[byteIndex + 1] === 'D'.charCodeAt(0) && everything[byteIndex + 2] === '3'.charCodeAt(0)) {\n // Exit early because we don't have enough to parse\n // the ID3 tag header\n if (everything.length - byteIndex < 10) {\n break;\n } // check framesize\n\n frameSize = aacUtils.parseId3TagSize(everything, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n // Add to byteIndex to support multiple ID3 tags in sequence\n\n if (byteIndex + frameSize > everything.length) {\n break;\n }\n chunk = {\n type: 'timed-metadata',\n data: everything.subarray(byteIndex, byteIndex + frameSize)\n };\n this.trigger('data', chunk);\n byteIndex += frameSize;\n continue;\n } else if ((everything[byteIndex] & 0xff) === 0xff && (everything[byteIndex + 1] & 0xf0) === 0xf0) {\n // Exit early because we don't have enough to parse\n // the ADTS frame header\n if (everything.length - byteIndex < 7) {\n break;\n }\n frameSize = aacUtils.parseAdtsSize(everything, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (byteIndex + frameSize > everything.length) {\n break;\n }\n packet = {\n type: 'audio',\n data: everything.subarray(byteIndex, byteIndex + frameSize),\n pts: timeStamp,\n dts: timeStamp\n };\n this.trigger('data', packet);\n byteIndex += frameSize;\n continue;\n }\n byteIndex++;\n }\n bytesLeft = everything.length - byteIndex;\n if (bytesLeft > 0) {\n everything = everything.subarray(byteIndex);\n } else {\n everything = new Uint8Array();\n }\n };\n this.reset = function () {\n everything = new Uint8Array();\n this.trigger('reset');\n };\n this.endTimeline = function () {\n everything = new Uint8Array();\n this.trigger('endedtimeline');\n };\n };\n AacStream$1.prototype = new Stream$1();\n var aac = AacStream$1;\n var AUDIO_PROPERTIES$1 = ['audioobjecttype', 'channelcount', 'samplerate', 'samplingfrequencyindex', 'samplesize'];\n var audioProperties = AUDIO_PROPERTIES$1;\n var VIDEO_PROPERTIES$1 = ['width', 'height', 'profileIdc', 'levelIdc', 'profileCompatibility', 'sarRatio'];\n var videoProperties = VIDEO_PROPERTIES$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A stream-based mp2t to mp4 converter. This utility can be used to\n * deliver mp4s to a SourceBuffer on platforms that support native\n * Media Source Extensions.\n */\n\n var Stream = stream;\n var mp4 = mp4Generator;\n var frameUtils = frameUtils$1;\n var audioFrameUtils = audioFrameUtils$1;\n var trackDecodeInfo = trackDecodeInfo$1;\n var m2ts = m2ts_1;\n var clock = clock$2;\n var AdtsStream = adts;\n var H264Stream = h264.H264Stream;\n var AacStream = aac;\n var isLikelyAacData = utils.isLikelyAacData;\n var ONE_SECOND_IN_TS$1 = clock$2.ONE_SECOND_IN_TS;\n var AUDIO_PROPERTIES = audioProperties;\n var VIDEO_PROPERTIES = videoProperties; // object types\n\n var VideoSegmentStream, AudioSegmentStream, Transmuxer, CoalesceStream;\n var retriggerForStream = function (key, event) {\n event.stream = key;\n this.trigger('log', event);\n };\n var addPipelineLogRetriggers = function (transmuxer, pipeline) {\n var keys = Object.keys(pipeline);\n for (var i = 0; i < keys.length; i++) {\n var key = keys[i]; // skip non-stream keys and headOfPipeline\n // which is just a duplicate\n\n if (key === 'headOfPipeline' || !pipeline[key].on) {\n continue;\n }\n pipeline[key].on('log', retriggerForStream.bind(transmuxer, key));\n }\n };\n /**\n * Compare two arrays (even typed) for same-ness\n */\n\n var arrayEquals = function (a, b) {\n var i;\n if (a.length !== b.length) {\n return false;\n } // compare the value of each element in the array\n\n for (i = 0; i < a.length; i++) {\n if (a[i] !== b[i]) {\n return false;\n }\n }\n return true;\n };\n var generateSegmentTimingInfo = function (baseMediaDecodeTime, startDts, startPts, endDts, endPts, prependedContentDuration) {\n var ptsOffsetFromDts = startPts - startDts,\n decodeDuration = endDts - startDts,\n presentationDuration = endPts - startPts; // The PTS and DTS values are based on the actual stream times from the segment,\n // however, the player time values will reflect a start from the baseMediaDecodeTime.\n // In order to provide relevant values for the player times, base timing info on the\n // baseMediaDecodeTime and the DTS and PTS durations of the segment.\n\n return {\n start: {\n dts: baseMediaDecodeTime,\n pts: baseMediaDecodeTime + ptsOffsetFromDts\n },\n end: {\n dts: baseMediaDecodeTime + decodeDuration,\n pts: baseMediaDecodeTime + presentationDuration\n },\n prependedContentDuration: prependedContentDuration,\n baseMediaDecodeTime: baseMediaDecodeTime\n };\n };\n /**\n * Constructs a single-track, ISO BMFF media segment from AAC data\n * events. The output of this stream can be fed to a SourceBuffer\n * configured with a suitable initialization segment.\n * @param track {object} track metadata configuration\n * @param options {object} transmuxer options object\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n AudioSegmentStream = function (track, options) {\n var adtsFrames = [],\n sequenceNumber,\n earliestAllowedDts = 0,\n audioAppendStartTs = 0,\n videoBaseMediaDecodeTime = Infinity;\n options = options || {};\n sequenceNumber = options.firstSequenceNumber || 0;\n AudioSegmentStream.prototype.init.call(this);\n this.push = function (data) {\n trackDecodeInfo.collectDtsInfo(track, data);\n if (track) {\n AUDIO_PROPERTIES.forEach(function (prop) {\n track[prop] = data[prop];\n });\n } // buffer audio data until end() is called\n\n adtsFrames.push(data);\n };\n this.setEarliestDts = function (earliestDts) {\n earliestAllowedDts = earliestDts;\n };\n this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n videoBaseMediaDecodeTime = baseMediaDecodeTime;\n };\n this.setAudioAppendStart = function (timestamp) {\n audioAppendStartTs = timestamp;\n };\n this.flush = function () {\n var frames, moof, mdat, boxes, frameDuration, segmentDuration, videoClockCyclesOfSilencePrefixed; // return early if no audio data has been observed\n\n if (adtsFrames.length === 0) {\n this.trigger('done', 'AudioSegmentStream');\n return;\n }\n frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps); // amount of audio filled but the value is in video clock rather than audio clock\n\n videoClockCyclesOfSilencePrefixed = audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to\n // samples (that is, adts frames) in the audio data\n\n track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat\n\n mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));\n adtsFrames = [];\n moof = mp4.moof(sequenceNumber, [track]);\n boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // bump the sequence number for next time\n\n sequenceNumber++;\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n trackDecodeInfo.clearDtsInfo(track);\n frameDuration = Math.ceil(ONE_SECOND_IN_TS$1 * 1024 / track.samplerate); // TODO this check was added to maintain backwards compatibility (particularly with\n // tests) on adding the timingInfo event. However, it seems unlikely that there's a\n // valid use-case where an init segment/data should be triggered without associated\n // frames. Leaving for now, but should be looked into.\n\n if (frames.length) {\n segmentDuration = frames.length * frameDuration;\n this.trigger('segmentTimingInfo', generateSegmentTimingInfo(\n // The audio track's baseMediaDecodeTime is in audio clock cycles, but the\n // frame info is in video clock cycles. Convert to match expectation of\n // listeners (that all timestamps will be based on video clock cycles).\n clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate),\n // frame times are already in video clock, as is segment duration\n frames[0].dts, frames[0].pts, frames[0].dts + segmentDuration, frames[0].pts + segmentDuration, videoClockCyclesOfSilencePrefixed || 0));\n this.trigger('timingInfo', {\n start: frames[0].pts,\n end: frames[0].pts + segmentDuration\n });\n }\n this.trigger('data', {\n track: track,\n boxes: boxes\n });\n this.trigger('done', 'AudioSegmentStream');\n };\n this.reset = function () {\n trackDecodeInfo.clearDtsInfo(track);\n adtsFrames = [];\n this.trigger('reset');\n };\n };\n AudioSegmentStream.prototype = new Stream();\n /**\n * Constructs a single-track, ISO BMFF media segment from H264 data\n * events. The output of this stream can be fed to a SourceBuffer\n * configured with a suitable initialization segment.\n * @param track {object} track metadata configuration\n * @param options {object} transmuxer options object\n * @param options.alignGopsAtEnd {boolean} If true, start from the end of the\n * gopsToAlignWith list when attempting to align gop pts\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n VideoSegmentStream = function (track, options) {\n var sequenceNumber,\n nalUnits = [],\n gopsToAlignWith = [],\n config,\n pps;\n options = options || {};\n sequenceNumber = options.firstSequenceNumber || 0;\n VideoSegmentStream.prototype.init.call(this);\n delete track.minPTS;\n this.gopCache_ = [];\n /**\n * Constructs a ISO BMFF segment given H264 nalUnits\n * @param {Object} nalUnit A data event representing a nalUnit\n * @param {String} nalUnit.nalUnitType\n * @param {Object} nalUnit.config Properties for a mp4 track\n * @param {Uint8Array} nalUnit.data The nalUnit bytes\n * @see lib/codecs/h264.js\n **/\n\n this.push = function (nalUnit) {\n trackDecodeInfo.collectDtsInfo(track, nalUnit); // record the track config\n\n if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {\n config = nalUnit.config;\n track.sps = [nalUnit.data];\n VIDEO_PROPERTIES.forEach(function (prop) {\n track[prop] = config[prop];\n }, this);\n }\n if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) {\n pps = nalUnit.data;\n track.pps = [nalUnit.data];\n } // buffer video until flush() is called\n\n nalUnits.push(nalUnit);\n };\n /**\n * Pass constructed ISO BMFF track and boxes on to the\n * next stream in the pipeline\n **/\n\n this.flush = function () {\n var frames,\n gopForFusion,\n gops,\n moof,\n mdat,\n boxes,\n prependedContentDuration = 0,\n firstGop,\n lastGop; // Throw away nalUnits at the start of the byte stream until\n // we find the first AUD\n\n while (nalUnits.length) {\n if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {\n break;\n }\n nalUnits.shift();\n } // Return early if no video data has been observed\n\n if (nalUnits.length === 0) {\n this.resetStream_();\n this.trigger('done', 'VideoSegmentStream');\n return;\n } // Organize the raw nal-units into arrays that represent\n // higher-level constructs such as frames and gops\n // (group-of-pictures)\n\n frames = frameUtils.groupNalsIntoFrames(nalUnits);\n gops = frameUtils.groupFramesIntoGops(frames); // If the first frame of this fragment is not a keyframe we have\n // a problem since MSE (on Chrome) requires a leading keyframe.\n //\n // We have two approaches to repairing this situation:\n // 1) GOP-FUSION:\n // This is where we keep track of the GOPS (group-of-pictures)\n // from previous fragments and attempt to find one that we can\n // prepend to the current fragment in order to create a valid\n // fragment.\n // 2) KEYFRAME-PULLING:\n // Here we search for the first keyframe in the fragment and\n // throw away all the frames between the start of the fragment\n // and that keyframe. We then extend the duration and pull the\n // PTS of the keyframe forward so that it covers the time range\n // of the frames that were disposed of.\n //\n // #1 is far prefereable over #2 which can cause \"stuttering\" but\n // requires more things to be just right.\n\n if (!gops[0][0].keyFrame) {\n // Search for a gop for fusion from our gopCache\n gopForFusion = this.getGopForFusion_(nalUnits[0], track);\n if (gopForFusion) {\n // in order to provide more accurate timing information about the segment, save\n // the number of seconds prepended to the original segment due to GOP fusion\n prependedContentDuration = gopForFusion.duration;\n gops.unshift(gopForFusion); // Adjust Gops' metadata to account for the inclusion of the\n // new gop at the beginning\n\n gops.byteLength += gopForFusion.byteLength;\n gops.nalCount += gopForFusion.nalCount;\n gops.pts = gopForFusion.pts;\n gops.dts = gopForFusion.dts;\n gops.duration += gopForFusion.duration;\n } else {\n // If we didn't find a candidate gop fall back to keyframe-pulling\n gops = frameUtils.extendFirstKeyFrame(gops);\n }\n } // Trim gops to align with gopsToAlignWith\n\n if (gopsToAlignWith.length) {\n var alignedGops;\n if (options.alignGopsAtEnd) {\n alignedGops = this.alignGopsAtEnd_(gops);\n } else {\n alignedGops = this.alignGopsAtStart_(gops);\n }\n if (!alignedGops) {\n // save all the nals in the last GOP into the gop cache\n this.gopCache_.unshift({\n gop: gops.pop(),\n pps: track.pps,\n sps: track.sps\n }); // Keep a maximum of 6 GOPs in the cache\n\n this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits\n\n nalUnits = []; // return early no gops can be aligned with desired gopsToAlignWith\n\n this.resetStream_();\n this.trigger('done', 'VideoSegmentStream');\n return;\n } // Some gops were trimmed. clear dts info so minSegmentDts and pts are correct\n // when recalculated before sending off to CoalesceStream\n\n trackDecodeInfo.clearDtsInfo(track);\n gops = alignedGops;\n }\n trackDecodeInfo.collectDtsInfo(track, gops); // First, we have to build the index from byte locations to\n // samples (that is, frames) in the video data\n\n track.samples = frameUtils.generateSampleTable(gops); // Concatenate the video data and construct the mdat\n\n mdat = mp4.mdat(frameUtils.concatenateNalData(gops));\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);\n this.trigger('processedGopsInfo', gops.map(function (gop) {\n return {\n pts: gop.pts,\n dts: gop.dts,\n byteLength: gop.byteLength\n };\n }));\n firstGop = gops[0];\n lastGop = gops[gops.length - 1];\n this.trigger('segmentTimingInfo', generateSegmentTimingInfo(track.baseMediaDecodeTime, firstGop.dts, firstGop.pts, lastGop.dts + lastGop.duration, lastGop.pts + lastGop.duration, prependedContentDuration));\n this.trigger('timingInfo', {\n start: gops[0].pts,\n end: gops[gops.length - 1].pts + gops[gops.length - 1].duration\n }); // save all the nals in the last GOP into the gop cache\n\n this.gopCache_.unshift({\n gop: gops.pop(),\n pps: track.pps,\n sps: track.sps\n }); // Keep a maximum of 6 GOPs in the cache\n\n this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits\n\n nalUnits = [];\n this.trigger('baseMediaDecodeTime', track.baseMediaDecodeTime);\n this.trigger('timelineStartInfo', track.timelineStartInfo);\n moof = mp4.moof(sequenceNumber, [track]); // it would be great to allocate this array up front instead of\n // throwing away hundreds of media segment fragments\n\n boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // Bump the sequence number for next time\n\n sequenceNumber++;\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n this.trigger('data', {\n track: track,\n boxes: boxes\n });\n this.resetStream_(); // Continue with the flush process now\n\n this.trigger('done', 'VideoSegmentStream');\n };\n this.reset = function () {\n this.resetStream_();\n nalUnits = [];\n this.gopCache_.length = 0;\n gopsToAlignWith.length = 0;\n this.trigger('reset');\n };\n this.resetStream_ = function () {\n trackDecodeInfo.clearDtsInfo(track); // reset config and pps because they may differ across segments\n // for instance, when we are rendition switching\n\n config = undefined;\n pps = undefined;\n }; // Search for a candidate Gop for gop-fusion from the gop cache and\n // return it or return null if no good candidate was found\n\n this.getGopForFusion_ = function (nalUnit) {\n var halfSecond = 45000,\n // Half-a-second in a 90khz clock\n allowableOverlap = 10000,\n // About 3 frames @ 30fps\n nearestDistance = Infinity,\n dtsDistance,\n nearestGopObj,\n currentGop,\n currentGopObj,\n i; // Search for the GOP nearest to the beginning of this nal unit\n\n for (i = 0; i < this.gopCache_.length; i++) {\n currentGopObj = this.gopCache_[i];\n currentGop = currentGopObj.gop; // Reject Gops with different SPS or PPS\n\n if (!(track.pps && arrayEquals(track.pps[0], currentGopObj.pps[0])) || !(track.sps && arrayEquals(track.sps[0], currentGopObj.sps[0]))) {\n continue;\n } // Reject Gops that would require a negative baseMediaDecodeTime\n\n if (currentGop.dts < track.timelineStartInfo.dts) {\n continue;\n } // The distance between the end of the gop and the start of the nalUnit\n\n dtsDistance = nalUnit.dts - currentGop.dts - currentGop.duration; // Only consider GOPS that start before the nal unit and end within\n // a half-second of the nal unit\n\n if (dtsDistance >= -allowableOverlap && dtsDistance <= halfSecond) {\n // Always use the closest GOP we found if there is more than\n // one candidate\n if (!nearestGopObj || nearestDistance > dtsDistance) {\n nearestGopObj = currentGopObj;\n nearestDistance = dtsDistance;\n }\n }\n }\n if (nearestGopObj) {\n return nearestGopObj.gop;\n }\n return null;\n }; // trim gop list to the first gop found that has a matching pts with a gop in the list\n // of gopsToAlignWith starting from the START of the list\n\n this.alignGopsAtStart_ = function (gops) {\n var alignIndex, gopIndex, align, gop, byteLength, nalCount, duration, alignedGops;\n byteLength = gops.byteLength;\n nalCount = gops.nalCount;\n duration = gops.duration;\n alignIndex = gopIndex = 0;\n while (alignIndex < gopsToAlignWith.length && gopIndex < gops.length) {\n align = gopsToAlignWith[alignIndex];\n gop = gops[gopIndex];\n if (align.pts === gop.pts) {\n break;\n }\n if (gop.pts > align.pts) {\n // this current gop starts after the current gop we want to align on, so increment\n // align index\n alignIndex++;\n continue;\n } // current gop starts before the current gop we want to align on. so increment gop\n // index\n\n gopIndex++;\n byteLength -= gop.byteLength;\n nalCount -= gop.nalCount;\n duration -= gop.duration;\n }\n if (gopIndex === 0) {\n // no gops to trim\n return gops;\n }\n if (gopIndex === gops.length) {\n // all gops trimmed, skip appending all gops\n return null;\n }\n alignedGops = gops.slice(gopIndex);\n alignedGops.byteLength = byteLength;\n alignedGops.duration = duration;\n alignedGops.nalCount = nalCount;\n alignedGops.pts = alignedGops[0].pts;\n alignedGops.dts = alignedGops[0].dts;\n return alignedGops;\n }; // trim gop list to the first gop found that has a matching pts with a gop in the list\n // of gopsToAlignWith starting from the END of the list\n\n this.alignGopsAtEnd_ = function (gops) {\n var alignIndex, gopIndex, align, gop, alignEndIndex, matchFound;\n alignIndex = gopsToAlignWith.length - 1;\n gopIndex = gops.length - 1;\n alignEndIndex = null;\n matchFound = false;\n while (alignIndex >= 0 && gopIndex >= 0) {\n align = gopsToAlignWith[alignIndex];\n gop = gops[gopIndex];\n if (align.pts === gop.pts) {\n matchFound = true;\n break;\n }\n if (align.pts > gop.pts) {\n alignIndex--;\n continue;\n }\n if (alignIndex === gopsToAlignWith.length - 1) {\n // gop.pts is greater than the last alignment candidate. If no match is found\n // by the end of this loop, we still want to append gops that come after this\n // point\n alignEndIndex = gopIndex;\n }\n gopIndex--;\n }\n if (!matchFound && alignEndIndex === null) {\n return null;\n }\n var trimIndex;\n if (matchFound) {\n trimIndex = gopIndex;\n } else {\n trimIndex = alignEndIndex;\n }\n if (trimIndex === 0) {\n return gops;\n }\n var alignedGops = gops.slice(trimIndex);\n var metadata = alignedGops.reduce(function (total, gop) {\n total.byteLength += gop.byteLength;\n total.duration += gop.duration;\n total.nalCount += gop.nalCount;\n return total;\n }, {\n byteLength: 0,\n duration: 0,\n nalCount: 0\n });\n alignedGops.byteLength = metadata.byteLength;\n alignedGops.duration = metadata.duration;\n alignedGops.nalCount = metadata.nalCount;\n alignedGops.pts = alignedGops[0].pts;\n alignedGops.dts = alignedGops[0].dts;\n return alignedGops;\n };\n this.alignGopsWith = function (newGopsToAlignWith) {\n gopsToAlignWith = newGopsToAlignWith;\n };\n };\n VideoSegmentStream.prototype = new Stream();\n /**\n * A Stream that can combine multiple streams (ie. audio & video)\n * into a single output segment for MSE. Also supports audio-only\n * and video-only streams.\n * @param options {object} transmuxer options object\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at media timeline start.\n */\n\n CoalesceStream = function (options, metadataStream) {\n // Number of Tracks per output segment\n // If greater than 1, we combine multiple\n // tracks into a single segment\n this.numberOfTracks = 0;\n this.metadataStream = metadataStream;\n options = options || {};\n if (typeof options.remux !== 'undefined') {\n this.remuxTracks = !!options.remux;\n } else {\n this.remuxTracks = true;\n }\n if (typeof options.keepOriginalTimestamps === 'boolean') {\n this.keepOriginalTimestamps = options.keepOriginalTimestamps;\n } else {\n this.keepOriginalTimestamps = false;\n }\n this.pendingTracks = [];\n this.videoTrack = null;\n this.pendingBoxes = [];\n this.pendingCaptions = [];\n this.pendingMetadata = [];\n this.pendingBytes = 0;\n this.emittedTracks = 0;\n CoalesceStream.prototype.init.call(this); // Take output from multiple\n\n this.push = function (output) {\n // buffer incoming captions until the associated video segment\n // finishes\n if (output.content || output.text) {\n return this.pendingCaptions.push(output);\n } // buffer incoming id3 tags until the final flush\n\n if (output.frames) {\n return this.pendingMetadata.push(output);\n } // Add this track to the list of pending tracks and store\n // important information required for the construction of\n // the final segment\n\n this.pendingTracks.push(output.track);\n this.pendingBytes += output.boxes.byteLength; // TODO: is there an issue for this against chrome?\n // We unshift audio and push video because\n // as of Chrome 75 when switching from\n // one init segment to another if the video\n // mdat does not appear after the audio mdat\n // only audio will play for the duration of our transmux.\n\n if (output.track.type === 'video') {\n this.videoTrack = output.track;\n this.pendingBoxes.push(output.boxes);\n }\n if (output.track.type === 'audio') {\n this.audioTrack = output.track;\n this.pendingBoxes.unshift(output.boxes);\n }\n };\n };\n CoalesceStream.prototype = new Stream();\n CoalesceStream.prototype.flush = function (flushSource) {\n var offset = 0,\n event = {\n captions: [],\n captionStreams: {},\n metadata: [],\n info: {}\n },\n caption,\n id3,\n initSegment,\n timelineStartPts = 0,\n i;\n if (this.pendingTracks.length < this.numberOfTracks) {\n if (flushSource !== 'VideoSegmentStream' && flushSource !== 'AudioSegmentStream') {\n // Return because we haven't received a flush from a data-generating\n // portion of the segment (meaning that we have only recieved meta-data\n // or captions.)\n return;\n } else if (this.remuxTracks) {\n // Return until we have enough tracks from the pipeline to remux (if we\n // are remuxing audio and video into a single MP4)\n return;\n } else if (this.pendingTracks.length === 0) {\n // In the case where we receive a flush without any data having been\n // received we consider it an emitted track for the purposes of coalescing\n // `done` events.\n // We do this for the case where there is an audio and video track in the\n // segment but no audio data. (seen in several playlists with alternate\n // audio tracks and no audio present in the main TS segments.)\n this.emittedTracks++;\n if (this.emittedTracks >= this.numberOfTracks) {\n this.trigger('done');\n this.emittedTracks = 0;\n }\n return;\n }\n }\n if (this.videoTrack) {\n timelineStartPts = this.videoTrack.timelineStartInfo.pts;\n VIDEO_PROPERTIES.forEach(function (prop) {\n event.info[prop] = this.videoTrack[prop];\n }, this);\n } else if (this.audioTrack) {\n timelineStartPts = this.audioTrack.timelineStartInfo.pts;\n AUDIO_PROPERTIES.forEach(function (prop) {\n event.info[prop] = this.audioTrack[prop];\n }, this);\n }\n if (this.videoTrack || this.audioTrack) {\n if (this.pendingTracks.length === 1) {\n event.type = this.pendingTracks[0].type;\n } else {\n event.type = 'combined';\n }\n this.emittedTracks += this.pendingTracks.length;\n initSegment = mp4.initSegment(this.pendingTracks); // Create a new typed array to hold the init segment\n\n event.initSegment = new Uint8Array(initSegment.byteLength); // Create an init segment containing a moov\n // and track definitions\n\n event.initSegment.set(initSegment); // Create a new typed array to hold the moof+mdats\n\n event.data = new Uint8Array(this.pendingBytes); // Append each moof+mdat (one per track) together\n\n for (i = 0; i < this.pendingBoxes.length; i++) {\n event.data.set(this.pendingBoxes[i], offset);\n offset += this.pendingBoxes[i].byteLength;\n } // Translate caption PTS times into second offsets to match the\n // video timeline for the segment, and add track info\n\n for (i = 0; i < this.pendingCaptions.length; i++) {\n caption = this.pendingCaptions[i];\n caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, this.keepOriginalTimestamps);\n caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, this.keepOriginalTimestamps);\n event.captionStreams[caption.stream] = true;\n event.captions.push(caption);\n } // Translate ID3 frame PTS times into second offsets to match the\n // video timeline for the segment\n\n for (i = 0; i < this.pendingMetadata.length; i++) {\n id3 = this.pendingMetadata[i];\n id3.cueTime = clock.metadataTsToSeconds(id3.pts, timelineStartPts, this.keepOriginalTimestamps);\n event.metadata.push(id3);\n } // We add this to every single emitted segment even though we only need\n // it for the first\n\n event.metadata.dispatchType = this.metadataStream.dispatchType; // Reset stream state\n\n this.pendingTracks.length = 0;\n this.videoTrack = null;\n this.pendingBoxes.length = 0;\n this.pendingCaptions.length = 0;\n this.pendingBytes = 0;\n this.pendingMetadata.length = 0; // Emit the built segment\n // We include captions and ID3 tags for backwards compatibility,\n // ideally we should send only video and audio in the data event\n\n this.trigger('data', event); // Emit each caption to the outside world\n // Ideally, this would happen immediately on parsing captions,\n // but we need to ensure that video data is sent back first\n // so that caption timing can be adjusted to match video timing\n\n for (i = 0; i < event.captions.length; i++) {\n caption = event.captions[i];\n this.trigger('caption', caption);\n } // Emit each id3 tag to the outside world\n // Ideally, this would happen immediately on parsing the tag,\n // but we need to ensure that video data is sent back first\n // so that ID3 frame timing can be adjusted to match video timing\n\n for (i = 0; i < event.metadata.length; i++) {\n id3 = event.metadata[i];\n this.trigger('id3Frame', id3);\n }\n } // Only emit `done` if all tracks have been flushed and emitted\n\n if (this.emittedTracks >= this.numberOfTracks) {\n this.trigger('done');\n this.emittedTracks = 0;\n }\n };\n CoalesceStream.prototype.setRemux = function (val) {\n this.remuxTracks = val;\n };\n /**\n * A Stream that expects MP2T binary data as input and produces\n * corresponding media segments, suitable for use with Media Source\n * Extension (MSE) implementations that support the ISO BMFF byte\n * stream format, like Chrome.\n */\n\n Transmuxer = function (options) {\n var self = this,\n hasFlushed = true,\n videoTrack,\n audioTrack;\n Transmuxer.prototype.init.call(this);\n options = options || {};\n this.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;\n this.transmuxPipeline_ = {};\n this.setupAacPipeline = function () {\n var pipeline = {};\n this.transmuxPipeline_ = pipeline;\n pipeline.type = 'aac';\n pipeline.metadataStream = new m2ts.MetadataStream(); // set up the parsing pipeline\n\n pipeline.aacStream = new AacStream();\n pipeline.audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');\n pipeline.timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');\n pipeline.adtsStream = new AdtsStream();\n pipeline.coalesceStream = new CoalesceStream(options, pipeline.metadataStream);\n pipeline.headOfPipeline = pipeline.aacStream;\n pipeline.aacStream.pipe(pipeline.audioTimestampRolloverStream).pipe(pipeline.adtsStream);\n pipeline.aacStream.pipe(pipeline.timedMetadataTimestampRolloverStream).pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream);\n pipeline.metadataStream.on('timestamp', function (frame) {\n pipeline.aacStream.setTimestamp(frame.timeStamp);\n });\n pipeline.aacStream.on('data', function (data) {\n if (data.type !== 'timed-metadata' && data.type !== 'audio' || pipeline.audioSegmentStream) {\n return;\n }\n audioTrack = audioTrack || {\n timelineStartInfo: {\n baseMediaDecodeTime: self.baseMediaDecodeTime\n },\n codec: 'adts',\n type: 'audio'\n }; // hook up the audio segment stream to the first track with aac data\n\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.audioSegmentStream = new AudioSegmentStream(audioTrack, options);\n pipeline.audioSegmentStream.on('log', self.getLogTrigger_('audioSegmentStream'));\n pipeline.audioSegmentStream.on('timingInfo', self.trigger.bind(self, 'audioTimingInfo')); // Set up the final part of the audio pipeline\n\n pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream); // emit pmt info\n\n self.trigger('trackinfo', {\n hasAudio: !!audioTrack,\n hasVideo: !!videoTrack\n });\n }); // Re-emit any data coming from the coalesce stream to the outside world\n\n pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data')); // Let the consumer know we have finished flushing the entire pipeline\n\n pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));\n addPipelineLogRetriggers(this, pipeline);\n };\n this.setupTsPipeline = function () {\n var pipeline = {};\n this.transmuxPipeline_ = pipeline;\n pipeline.type = 'ts';\n pipeline.metadataStream = new m2ts.MetadataStream(); // set up the parsing pipeline\n\n pipeline.packetStream = new m2ts.TransportPacketStream();\n pipeline.parseStream = new m2ts.TransportParseStream();\n pipeline.elementaryStream = new m2ts.ElementaryStream();\n pipeline.timestampRolloverStream = new m2ts.TimestampRolloverStream();\n pipeline.adtsStream = new AdtsStream();\n pipeline.h264Stream = new H264Stream();\n pipeline.captionStream = new m2ts.CaptionStream(options);\n pipeline.coalesceStream = new CoalesceStream(options, pipeline.metadataStream);\n pipeline.headOfPipeline = pipeline.packetStream; // disassemble MPEG2-TS packets into elementary streams\n\n pipeline.packetStream.pipe(pipeline.parseStream).pipe(pipeline.elementaryStream).pipe(pipeline.timestampRolloverStream); // !!THIS ORDER IS IMPORTANT!!\n // demux the streams\n\n pipeline.timestampRolloverStream.pipe(pipeline.h264Stream);\n pipeline.timestampRolloverStream.pipe(pipeline.adtsStream);\n pipeline.timestampRolloverStream.pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream); // Hook up CEA-608/708 caption stream\n\n pipeline.h264Stream.pipe(pipeline.captionStream).pipe(pipeline.coalesceStream);\n pipeline.elementaryStream.on('data', function (data) {\n var i;\n if (data.type === 'metadata') {\n i = data.tracks.length; // scan the tracks listed in the metadata\n\n while (i--) {\n if (!videoTrack && data.tracks[i].type === 'video') {\n videoTrack = data.tracks[i];\n videoTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;\n } else if (!audioTrack && data.tracks[i].type === 'audio') {\n audioTrack = data.tracks[i];\n audioTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;\n }\n } // hook up the video segment stream to the first track with h264 data\n\n if (videoTrack && !pipeline.videoSegmentStream) {\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.videoSegmentStream = new VideoSegmentStream(videoTrack, options);\n pipeline.videoSegmentStream.on('log', self.getLogTrigger_('videoSegmentStream'));\n pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) {\n // When video emits timelineStartInfo data after a flush, we forward that\n // info to the AudioSegmentStream, if it exists, because video timeline\n // data takes precedence. Do not do this if keepOriginalTimestamps is set,\n // because this is a particularly subtle form of timestamp alteration.\n if (audioTrack && !options.keepOriginalTimestamps) {\n audioTrack.timelineStartInfo = timelineStartInfo; // On the first segment we trim AAC frames that exist before the\n // very earliest DTS we have seen in video because Chrome will\n // interpret any video track with a baseMediaDecodeTime that is\n // non-zero as a gap.\n\n pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - self.baseMediaDecodeTime);\n }\n });\n pipeline.videoSegmentStream.on('processedGopsInfo', self.trigger.bind(self, 'gopInfo'));\n pipeline.videoSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'videoSegmentTimingInfo'));\n pipeline.videoSegmentStream.on('baseMediaDecodeTime', function (baseMediaDecodeTime) {\n if (audioTrack) {\n pipeline.audioSegmentStream.setVideoBaseMediaDecodeTime(baseMediaDecodeTime);\n }\n });\n pipeline.videoSegmentStream.on('timingInfo', self.trigger.bind(self, 'videoTimingInfo')); // Set up the final part of the video pipeline\n\n pipeline.h264Stream.pipe(pipeline.videoSegmentStream).pipe(pipeline.coalesceStream);\n }\n if (audioTrack && !pipeline.audioSegmentStream) {\n // hook up the audio segment stream to the first track with aac data\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.audioSegmentStream = new AudioSegmentStream(audioTrack, options);\n pipeline.audioSegmentStream.on('log', self.getLogTrigger_('audioSegmentStream'));\n pipeline.audioSegmentStream.on('timingInfo', self.trigger.bind(self, 'audioTimingInfo'));\n pipeline.audioSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'audioSegmentTimingInfo')); // Set up the final part of the audio pipeline\n\n pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream);\n } // emit pmt info\n\n self.trigger('trackinfo', {\n hasAudio: !!audioTrack,\n hasVideo: !!videoTrack\n });\n }\n }); // Re-emit any data coming from the coalesce stream to the outside world\n\n pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data'));\n pipeline.coalesceStream.on('id3Frame', function (id3Frame) {\n id3Frame.dispatchType = pipeline.metadataStream.dispatchType;\n self.trigger('id3Frame', id3Frame);\n });\n pipeline.coalesceStream.on('caption', this.trigger.bind(this, 'caption')); // Let the consumer know we have finished flushing the entire pipeline\n\n pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));\n addPipelineLogRetriggers(this, pipeline);\n }; // hook up the segment streams once track metadata is delivered\n\n this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n var pipeline = this.transmuxPipeline_;\n if (!options.keepOriginalTimestamps) {\n this.baseMediaDecodeTime = baseMediaDecodeTime;\n }\n if (audioTrack) {\n audioTrack.timelineStartInfo.dts = undefined;\n audioTrack.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(audioTrack);\n if (pipeline.audioTimestampRolloverStream) {\n pipeline.audioTimestampRolloverStream.discontinuity();\n }\n }\n if (videoTrack) {\n if (pipeline.videoSegmentStream) {\n pipeline.videoSegmentStream.gopCache_ = [];\n }\n videoTrack.timelineStartInfo.dts = undefined;\n videoTrack.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(videoTrack);\n pipeline.captionStream.reset();\n }\n if (pipeline.timestampRolloverStream) {\n pipeline.timestampRolloverStream.discontinuity();\n }\n };\n this.setAudioAppendStart = function (timestamp) {\n if (audioTrack) {\n this.transmuxPipeline_.audioSegmentStream.setAudioAppendStart(timestamp);\n }\n };\n this.setRemux = function (val) {\n var pipeline = this.transmuxPipeline_;\n options.remux = val;\n if (pipeline && pipeline.coalesceStream) {\n pipeline.coalesceStream.setRemux(val);\n }\n };\n this.alignGopsWith = function (gopsToAlignWith) {\n if (videoTrack && this.transmuxPipeline_.videoSegmentStream) {\n this.transmuxPipeline_.videoSegmentStream.alignGopsWith(gopsToAlignWith);\n }\n };\n this.getLogTrigger_ = function (key) {\n var self = this;\n return function (event) {\n event.stream = key;\n self.trigger('log', event);\n };\n }; // feed incoming data to the front of the parsing pipeline\n\n this.push = function (data) {\n if (hasFlushed) {\n var isAac = isLikelyAacData(data);\n if (isAac && this.transmuxPipeline_.type !== 'aac') {\n this.setupAacPipeline();\n } else if (!isAac && this.transmuxPipeline_.type !== 'ts') {\n this.setupTsPipeline();\n }\n hasFlushed = false;\n }\n this.transmuxPipeline_.headOfPipeline.push(data);\n }; // flush any buffered data\n\n this.flush = function () {\n hasFlushed = true; // Start at the top of the pipeline and flush all pending work\n\n this.transmuxPipeline_.headOfPipeline.flush();\n };\n this.endTimeline = function () {\n this.transmuxPipeline_.headOfPipeline.endTimeline();\n };\n this.reset = function () {\n if (this.transmuxPipeline_.headOfPipeline) {\n this.transmuxPipeline_.headOfPipeline.reset();\n }\n }; // Caption data has to be reset when seeking outside buffered range\n\n this.resetCaptions = function () {\n if (this.transmuxPipeline_.captionStream) {\n this.transmuxPipeline_.captionStream.reset();\n }\n };\n };\n Transmuxer.prototype = new Stream();\n var transmuxer = {\n Transmuxer: Transmuxer,\n VideoSegmentStream: VideoSegmentStream,\n AudioSegmentStream: AudioSegmentStream,\n AUDIO_PROPERTIES: AUDIO_PROPERTIES,\n VIDEO_PROPERTIES: VIDEO_PROPERTIES,\n // exported for testing\n generateSegmentTimingInfo: generateSegmentTimingInfo\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var toUnsigned$3 = function (value) {\n return value >>> 0;\n };\n var toHexString$1 = function (value) {\n return ('00' + value.toString(16)).slice(-2);\n };\n var bin = {\n toUnsigned: toUnsigned$3,\n toHexString: toHexString$1\n };\n var parseType$3 = function (buffer) {\n var result = '';\n result += String.fromCharCode(buffer[0]);\n result += String.fromCharCode(buffer[1]);\n result += String.fromCharCode(buffer[2]);\n result += String.fromCharCode(buffer[3]);\n return result;\n };\n var parseType_1 = parseType$3;\n var toUnsigned$2 = bin.toUnsigned;\n var parseType$2 = parseType_1;\n var findBox$2 = function (data, path) {\n var results = [],\n i,\n size,\n type,\n end,\n subresults;\n if (!path.length) {\n // short-circuit the search for empty paths\n return null;\n }\n for (i = 0; i < data.byteLength;) {\n size = toUnsigned$2(data[i] << 24 | data[i + 1] << 16 | data[i + 2] << 8 | data[i + 3]);\n type = parseType$2(data.subarray(i + 4, i + 8));\n end = size > 1 ? i + size : data.byteLength;\n if (type === path[0]) {\n if (path.length === 1) {\n // this is the end of the path and we've found the box we were\n // looking for\n results.push(data.subarray(i + 8, end));\n } else {\n // recursively search for the next box along the path\n subresults = findBox$2(data.subarray(i + 8, end), path.slice(1));\n if (subresults.length) {\n results = results.concat(subresults);\n }\n }\n }\n i = end;\n } // we've finished searching all of data\n\n return results;\n };\n var findBox_1 = findBox$2;\n var toUnsigned$1 = bin.toUnsigned;\n var getUint64$2 = numbers.getUint64;\n var tfdt = function (data) {\n var result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4))\n };\n if (result.version === 1) {\n result.baseMediaDecodeTime = getUint64$2(data.subarray(4));\n } else {\n result.baseMediaDecodeTime = toUnsigned$1(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]);\n }\n return result;\n };\n var parseTfdt$2 = tfdt;\n var parseSampleFlags$1 = function (flags) {\n return {\n isLeading: (flags[0] & 0x0c) >>> 2,\n dependsOn: flags[0] & 0x03,\n isDependedOn: (flags[1] & 0xc0) >>> 6,\n hasRedundancy: (flags[1] & 0x30) >>> 4,\n paddingValue: (flags[1] & 0x0e) >>> 1,\n isNonSyncSample: flags[1] & 0x01,\n degradationPriority: flags[2] << 8 | flags[3]\n };\n };\n var parseSampleFlags_1 = parseSampleFlags$1;\n var parseSampleFlags = parseSampleFlags_1;\n var trun = function (data) {\n var result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n samples: []\n },\n view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n // Flag interpretation\n dataOffsetPresent = result.flags[2] & 0x01,\n // compare with 2nd byte of 0x1\n firstSampleFlagsPresent = result.flags[2] & 0x04,\n // compare with 2nd byte of 0x4\n sampleDurationPresent = result.flags[1] & 0x01,\n // compare with 2nd byte of 0x100\n sampleSizePresent = result.flags[1] & 0x02,\n // compare with 2nd byte of 0x200\n sampleFlagsPresent = result.flags[1] & 0x04,\n // compare with 2nd byte of 0x400\n sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08,\n // compare with 2nd byte of 0x800\n sampleCount = view.getUint32(4),\n offset = 8,\n sample;\n if (dataOffsetPresent) {\n // 32 bit signed integer\n result.dataOffset = view.getInt32(offset);\n offset += 4;\n } // Overrides the flags for the first sample only. The order of\n // optional values will be: duration, size, compositionTimeOffset\n\n if (firstSampleFlagsPresent && sampleCount) {\n sample = {\n flags: parseSampleFlags(data.subarray(offset, offset + 4))\n };\n offset += 4;\n if (sampleDurationPresent) {\n sample.duration = view.getUint32(offset);\n offset += 4;\n }\n if (sampleSizePresent) {\n sample.size = view.getUint32(offset);\n offset += 4;\n }\n if (sampleCompositionTimeOffsetPresent) {\n if (result.version === 1) {\n sample.compositionTimeOffset = view.getInt32(offset);\n } else {\n sample.compositionTimeOffset = view.getUint32(offset);\n }\n offset += 4;\n }\n result.samples.push(sample);\n sampleCount--;\n }\n while (sampleCount--) {\n sample = {};\n if (sampleDurationPresent) {\n sample.duration = view.getUint32(offset);\n offset += 4;\n }\n if (sampleSizePresent) {\n sample.size = view.getUint32(offset);\n offset += 4;\n }\n if (sampleFlagsPresent) {\n sample.flags = parseSampleFlags(data.subarray(offset, offset + 4));\n offset += 4;\n }\n if (sampleCompositionTimeOffsetPresent) {\n if (result.version === 1) {\n sample.compositionTimeOffset = view.getInt32(offset);\n } else {\n sample.compositionTimeOffset = view.getUint32(offset);\n }\n offset += 4;\n }\n result.samples.push(sample);\n }\n return result;\n };\n var parseTrun$2 = trun;\n var tfhd = function (data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n trackId: view.getUint32(4)\n },\n baseDataOffsetPresent = result.flags[2] & 0x01,\n sampleDescriptionIndexPresent = result.flags[2] & 0x02,\n defaultSampleDurationPresent = result.flags[2] & 0x08,\n defaultSampleSizePresent = result.flags[2] & 0x10,\n defaultSampleFlagsPresent = result.flags[2] & 0x20,\n durationIsEmpty = result.flags[0] & 0x010000,\n defaultBaseIsMoof = result.flags[0] & 0x020000,\n i;\n i = 8;\n if (baseDataOffsetPresent) {\n i += 4; // truncate top 4 bytes\n // FIXME: should we read the full 64 bits?\n\n result.baseDataOffset = view.getUint32(12);\n i += 4;\n }\n if (sampleDescriptionIndexPresent) {\n result.sampleDescriptionIndex = view.getUint32(i);\n i += 4;\n }\n if (defaultSampleDurationPresent) {\n result.defaultSampleDuration = view.getUint32(i);\n i += 4;\n }\n if (defaultSampleSizePresent) {\n result.defaultSampleSize = view.getUint32(i);\n i += 4;\n }\n if (defaultSampleFlagsPresent) {\n result.defaultSampleFlags = view.getUint32(i);\n }\n if (durationIsEmpty) {\n result.durationIsEmpty = true;\n }\n if (!baseDataOffsetPresent && defaultBaseIsMoof) {\n result.baseDataOffsetIsMoof = true;\n }\n return result;\n };\n var parseTfhd$2 = tfhd;\n var win;\n if (typeof window !== \"undefined\") {\n win = window;\n } else if (typeof commonjsGlobal !== \"undefined\") {\n win = commonjsGlobal;\n } else if (typeof self !== \"undefined\") {\n win = self;\n } else {\n win = {};\n }\n var window_1 = win;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Reads in-band CEA-708 captions out of FMP4 segments.\n * @see https://en.wikipedia.org/wiki/CEA-708\n */\n\n var discardEmulationPreventionBytes = captionPacketParser.discardEmulationPreventionBytes;\n var CaptionStream = captionStream.CaptionStream;\n var findBox$1 = findBox_1;\n var parseTfdt$1 = parseTfdt$2;\n var parseTrun$1 = parseTrun$2;\n var parseTfhd$1 = parseTfhd$2;\n var window$2 = window_1;\n /**\n * Maps an offset in the mdat to a sample based on the the size of the samples.\n * Assumes that `parseSamples` has been called first.\n *\n * @param {Number} offset - The offset into the mdat\n * @param {Object[]} samples - An array of samples, parsed using `parseSamples`\n * @return {?Object} The matching sample, or null if no match was found.\n *\n * @see ISO-BMFF-12/2015, Section 8.8.8\n **/\n\n var mapToSample = function (offset, samples) {\n var approximateOffset = offset;\n for (var i = 0; i < samples.length; i++) {\n var sample = samples[i];\n if (approximateOffset < sample.size) {\n return sample;\n }\n approximateOffset -= sample.size;\n }\n return null;\n };\n /**\n * Finds SEI nal units contained in a Media Data Box.\n * Assumes that `parseSamples` has been called first.\n *\n * @param {Uint8Array} avcStream - The bytes of the mdat\n * @param {Object[]} samples - The samples parsed out by `parseSamples`\n * @param {Number} trackId - The trackId of this video track\n * @return {Object[]} seiNals - the parsed SEI NALUs found.\n * The contents of the seiNal should match what is expected by\n * CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts)\n *\n * @see ISO-BMFF-12/2015, Section 8.1.1\n * @see Rec. ITU-T H.264, 7.3.2.3.1\n **/\n\n var findSeiNals = function (avcStream, samples, trackId) {\n var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),\n result = {\n logs: [],\n seiNals: []\n },\n seiNal,\n i,\n length,\n lastMatchedSample;\n for (i = 0; i + 4 < avcStream.length; i += length) {\n length = avcView.getUint32(i);\n i += 4; // Bail if this doesn't appear to be an H264 stream\n\n if (length <= 0) {\n continue;\n }\n switch (avcStream[i] & 0x1F) {\n case 0x06:\n var data = avcStream.subarray(i + 1, i + 1 + length);\n var matchingSample = mapToSample(i, samples);\n seiNal = {\n nalUnitType: 'sei_rbsp',\n size: length,\n data: data,\n escapedRBSP: discardEmulationPreventionBytes(data),\n trackId: trackId\n };\n if (matchingSample) {\n seiNal.pts = matchingSample.pts;\n seiNal.dts = matchingSample.dts;\n lastMatchedSample = matchingSample;\n } else if (lastMatchedSample) {\n // If a matching sample cannot be found, use the last\n // sample's values as they should be as close as possible\n seiNal.pts = lastMatchedSample.pts;\n seiNal.dts = lastMatchedSample.dts;\n } else {\n result.logs.push({\n level: 'warn',\n message: 'We\\'ve encountered a nal unit without data at ' + i + ' for trackId ' + trackId + '. See mux.js#223.'\n });\n break;\n }\n result.seiNals.push(seiNal);\n break;\n }\n }\n return result;\n };\n /**\n * Parses sample information out of Track Run Boxes and calculates\n * the absolute presentation and decode timestamps of each sample.\n *\n * @param {Array} truns - The Trun Run boxes to be parsed\n * @param {Number|BigInt} baseMediaDecodeTime - base media decode time from tfdt\n @see ISO-BMFF-12/2015, Section 8.8.12\n * @param {Object} tfhd - The parsed Track Fragment Header\n * @see inspect.parseTfhd\n * @return {Object[]} the parsed samples\n *\n * @see ISO-BMFF-12/2015, Section 8.8.8\n **/\n\n var parseSamples = function (truns, baseMediaDecodeTime, tfhd) {\n var currentDts = baseMediaDecodeTime;\n var defaultSampleDuration = tfhd.defaultSampleDuration || 0;\n var defaultSampleSize = tfhd.defaultSampleSize || 0;\n var trackId = tfhd.trackId;\n var allSamples = [];\n truns.forEach(function (trun) {\n // Note: We currently do not parse the sample table as well\n // as the trun. It's possible some sources will require this.\n // moov > trak > mdia > minf > stbl\n var trackRun = parseTrun$1(trun);\n var samples = trackRun.samples;\n samples.forEach(function (sample) {\n if (sample.duration === undefined) {\n sample.duration = defaultSampleDuration;\n }\n if (sample.size === undefined) {\n sample.size = defaultSampleSize;\n }\n sample.trackId = trackId;\n sample.dts = currentDts;\n if (sample.compositionTimeOffset === undefined) {\n sample.compositionTimeOffset = 0;\n }\n if (typeof currentDts === 'bigint') {\n sample.pts = currentDts + window$2.BigInt(sample.compositionTimeOffset);\n currentDts += window$2.BigInt(sample.duration);\n } else {\n sample.pts = currentDts + sample.compositionTimeOffset;\n currentDts += sample.duration;\n }\n });\n allSamples = allSamples.concat(samples);\n });\n return allSamples;\n };\n /**\n * Parses out caption nals from an FMP4 segment's video tracks.\n *\n * @param {Uint8Array} segment - The bytes of a single segment\n * @param {Number} videoTrackId - The trackId of a video track in the segment\n * @return {Object.} A mapping of video trackId to\n * a list of seiNals found in that track\n **/\n\n var parseCaptionNals = function (segment, videoTrackId) {\n // To get the samples\n var trafs = findBox$1(segment, ['moof', 'traf']); // To get SEI NAL units\n\n var mdats = findBox$1(segment, ['mdat']);\n var captionNals = {};\n var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs\n\n mdats.forEach(function (mdat, index) {\n var matchingTraf = trafs[index];\n mdatTrafPairs.push({\n mdat: mdat,\n traf: matchingTraf\n });\n });\n mdatTrafPairs.forEach(function (pair) {\n var mdat = pair.mdat;\n var traf = pair.traf;\n var tfhd = findBox$1(traf, ['tfhd']); // Exactly 1 tfhd per traf\n\n var headerInfo = parseTfhd$1(tfhd[0]);\n var trackId = headerInfo.trackId;\n var tfdt = findBox$1(traf, ['tfdt']); // Either 0 or 1 tfdt per traf\n\n var baseMediaDecodeTime = tfdt.length > 0 ? parseTfdt$1(tfdt[0]).baseMediaDecodeTime : 0;\n var truns = findBox$1(traf, ['trun']);\n var samples;\n var result; // Only parse video data for the chosen video track\n\n if (videoTrackId === trackId && truns.length > 0) {\n samples = parseSamples(truns, baseMediaDecodeTime, headerInfo);\n result = findSeiNals(mdat, samples, trackId);\n if (!captionNals[trackId]) {\n captionNals[trackId] = {\n seiNals: [],\n logs: []\n };\n }\n captionNals[trackId].seiNals = captionNals[trackId].seiNals.concat(result.seiNals);\n captionNals[trackId].logs = captionNals[trackId].logs.concat(result.logs);\n }\n });\n return captionNals;\n };\n /**\n * Parses out inband captions from an MP4 container and returns\n * caption objects that can be used by WebVTT and the TextTrack API.\n * @see https://developer.mozilla.org/en-US/docs/Web/API/VTTCue\n * @see https://developer.mozilla.org/en-US/docs/Web/API/TextTrack\n * Assumes that `probe.getVideoTrackIds` and `probe.timescale` have been called first\n *\n * @param {Uint8Array} segment - The fmp4 segment containing embedded captions\n * @param {Number} trackId - The id of the video track to parse\n * @param {Number} timescale - The timescale for the video track from the init segment\n *\n * @return {?Object[]} parsedCaptions - A list of captions or null if no video tracks\n * @return {Number} parsedCaptions[].startTime - The time to show the caption in seconds\n * @return {Number} parsedCaptions[].endTime - The time to stop showing the caption in seconds\n * @return {Object[]} parsedCaptions[].content - A list of individual caption segments\n * @return {String} parsedCaptions[].content.text - The visible content of the caption segment\n * @return {Number} parsedCaptions[].content.line - The line height from 1-15 for positioning of the caption segment\n * @return {Number} parsedCaptions[].content.position - The column indent percentage for cue positioning from 10-80\n **/\n\n var parseEmbeddedCaptions = function (segment, trackId, timescale) {\n var captionNals; // the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there\n\n if (trackId === null) {\n return null;\n }\n captionNals = parseCaptionNals(segment, trackId);\n var trackNals = captionNals[trackId] || {};\n return {\n seiNals: trackNals.seiNals,\n logs: trackNals.logs,\n timescale: timescale\n };\n };\n /**\n * Converts SEI NALUs into captions that can be used by video.js\n **/\n\n var CaptionParser = function () {\n var isInitialized = false;\n var captionStream; // Stores segments seen before trackId and timescale are set\n\n var segmentCache; // Stores video track ID of the track being parsed\n\n var trackId; // Stores the timescale of the track being parsed\n\n var timescale; // Stores captions parsed so far\n\n var parsedCaptions; // Stores whether we are receiving partial data or not\n\n var parsingPartial;\n /**\n * A method to indicate whether a CaptionParser has been initalized\n * @returns {Boolean}\n **/\n\n this.isInitialized = function () {\n return isInitialized;\n };\n /**\n * Initializes the underlying CaptionStream, SEI NAL parsing\n * and management, and caption collection\n **/\n\n this.init = function (options) {\n captionStream = new CaptionStream();\n isInitialized = true;\n parsingPartial = options ? options.isPartial : false; // Collect dispatched captions\n\n captionStream.on('data', function (event) {\n // Convert to seconds in the source's timescale\n event.startTime = event.startPts / timescale;\n event.endTime = event.endPts / timescale;\n parsedCaptions.captions.push(event);\n parsedCaptions.captionStreams[event.stream] = true;\n });\n captionStream.on('log', function (log) {\n parsedCaptions.logs.push(log);\n });\n };\n /**\n * Determines if a new video track will be selected\n * or if the timescale changed\n * @return {Boolean}\n **/\n\n this.isNewInit = function (videoTrackIds, timescales) {\n if (videoTrackIds && videoTrackIds.length === 0 || timescales && typeof timescales === 'object' && Object.keys(timescales).length === 0) {\n return false;\n }\n return trackId !== videoTrackIds[0] || timescale !== timescales[trackId];\n };\n /**\n * Parses out SEI captions and interacts with underlying\n * CaptionStream to return dispatched captions\n *\n * @param {Uint8Array} segment - The fmp4 segment containing embedded captions\n * @param {Number[]} videoTrackIds - A list of video tracks found in the init segment\n * @param {Object.} timescales - The timescales found in the init segment\n * @see parseEmbeddedCaptions\n * @see m2ts/caption-stream.js\n **/\n\n this.parse = function (segment, videoTrackIds, timescales) {\n var parsedData;\n if (!this.isInitialized()) {\n return null; // This is not likely to be a video segment\n } else if (!videoTrackIds || !timescales) {\n return null;\n } else if (this.isNewInit(videoTrackIds, timescales)) {\n // Use the first video track only as there is no\n // mechanism to switch to other video tracks\n trackId = videoTrackIds[0];\n timescale = timescales[trackId]; // If an init segment has not been seen yet, hold onto segment\n // data until we have one.\n // the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there\n } else if (trackId === null || !timescale) {\n segmentCache.push(segment);\n return null;\n } // Now that a timescale and trackId is set, parse cached segments\n\n while (segmentCache.length > 0) {\n var cachedSegment = segmentCache.shift();\n this.parse(cachedSegment, videoTrackIds, timescales);\n }\n parsedData = parseEmbeddedCaptions(segment, trackId, timescale);\n if (parsedData && parsedData.logs) {\n parsedCaptions.logs = parsedCaptions.logs.concat(parsedData.logs);\n }\n if (parsedData === null || !parsedData.seiNals) {\n if (parsedCaptions.logs.length) {\n return {\n logs: parsedCaptions.logs,\n captions: [],\n captionStreams: []\n };\n }\n return null;\n }\n this.pushNals(parsedData.seiNals); // Force the parsed captions to be dispatched\n\n this.flushStream();\n return parsedCaptions;\n };\n /**\n * Pushes SEI NALUs onto CaptionStream\n * @param {Object[]} nals - A list of SEI nals parsed using `parseCaptionNals`\n * Assumes that `parseCaptionNals` has been called first\n * @see m2ts/caption-stream.js\n **/\n\n this.pushNals = function (nals) {\n if (!this.isInitialized() || !nals || nals.length === 0) {\n return null;\n }\n nals.forEach(function (nal) {\n captionStream.push(nal);\n });\n };\n /**\n * Flushes underlying CaptionStream to dispatch processed, displayable captions\n * @see m2ts/caption-stream.js\n **/\n\n this.flushStream = function () {\n if (!this.isInitialized()) {\n return null;\n }\n if (!parsingPartial) {\n captionStream.flush();\n } else {\n captionStream.partialFlush();\n }\n };\n /**\n * Reset caption buckets for new data\n **/\n\n this.clearParsedCaptions = function () {\n parsedCaptions.captions = [];\n parsedCaptions.captionStreams = {};\n parsedCaptions.logs = [];\n };\n /**\n * Resets underlying CaptionStream\n * @see m2ts/caption-stream.js\n **/\n\n this.resetCaptionStream = function () {\n if (!this.isInitialized()) {\n return null;\n }\n captionStream.reset();\n };\n /**\n * Convenience method to clear all captions flushed from the\n * CaptionStream and still being parsed\n * @see m2ts/caption-stream.js\n **/\n\n this.clearAllCaptions = function () {\n this.clearParsedCaptions();\n this.resetCaptionStream();\n };\n /**\n * Reset caption parser\n **/\n\n this.reset = function () {\n segmentCache = [];\n trackId = null;\n timescale = null;\n if (!parsedCaptions) {\n parsedCaptions = {\n captions: [],\n // CC1, CC2, CC3, CC4\n captionStreams: {},\n logs: []\n };\n } else {\n this.clearParsedCaptions();\n }\n this.resetCaptionStream();\n };\n this.reset();\n };\n var captionParser = CaptionParser;\n /**\n * Returns the first string in the data array ending with a null char '\\0'\n * @param {UInt8} data \n * @returns the string with the null char\n */\n\n var uint8ToCString$1 = function (data) {\n var index = 0;\n var curChar = String.fromCharCode(data[index]);\n var retString = '';\n while (curChar !== '\\0') {\n retString += curChar;\n index++;\n curChar = String.fromCharCode(data[index]);\n } // Add nullChar\n\n retString += curChar;\n return retString;\n };\n var string = {\n uint8ToCString: uint8ToCString$1\n };\n var uint8ToCString = string.uint8ToCString;\n var getUint64$1 = numbers.getUint64;\n /**\n * Based on: ISO/IEC 23009 Section: 5.10.3.3\n * References:\n * https://dashif-documents.azurewebsites.net/Events/master/event.html#emsg-format\n * https://aomediacodec.github.io/id3-emsg/\n * \n * Takes emsg box data as a uint8 array and returns a emsg box object\n * @param {UInt8Array} boxData data from emsg box\n * @returns A parsed emsg box object\n */\n\n var parseEmsgBox = function (boxData) {\n // version + flags\n var offset = 4;\n var version = boxData[0];\n var scheme_id_uri, value, timescale, presentation_time, presentation_time_delta, event_duration, id, message_data;\n if (version === 0) {\n scheme_id_uri = uint8ToCString(boxData.subarray(offset));\n offset += scheme_id_uri.length;\n value = uint8ToCString(boxData.subarray(offset));\n offset += value.length;\n var dv = new DataView(boxData.buffer);\n timescale = dv.getUint32(offset);\n offset += 4;\n presentation_time_delta = dv.getUint32(offset);\n offset += 4;\n event_duration = dv.getUint32(offset);\n offset += 4;\n id = dv.getUint32(offset);\n offset += 4;\n } else if (version === 1) {\n var dv = new DataView(boxData.buffer);\n timescale = dv.getUint32(offset);\n offset += 4;\n presentation_time = getUint64$1(boxData.subarray(offset));\n offset += 8;\n event_duration = dv.getUint32(offset);\n offset += 4;\n id = dv.getUint32(offset);\n offset += 4;\n scheme_id_uri = uint8ToCString(boxData.subarray(offset));\n offset += scheme_id_uri.length;\n value = uint8ToCString(boxData.subarray(offset));\n offset += value.length;\n }\n message_data = new Uint8Array(boxData.subarray(offset, boxData.byteLength));\n var emsgBox = {\n scheme_id_uri,\n value,\n // if timescale is undefined or 0 set to 1 \n timescale: timescale ? timescale : 1,\n presentation_time,\n presentation_time_delta,\n event_duration,\n id,\n message_data\n };\n return isValidEmsgBox(version, emsgBox) ? emsgBox : undefined;\n };\n /**\n * Scales a presentation time or time delta with an offset with a provided timescale\n * @param {number} presentationTime \n * @param {number} timescale \n * @param {number} timeDelta \n * @param {number} offset \n * @returns the scaled time as a number\n */\n\n var scaleTime = function (presentationTime, timescale, timeDelta, offset) {\n return presentationTime || presentationTime === 0 ? presentationTime / timescale : offset + timeDelta / timescale;\n };\n /**\n * Checks the emsg box data for validity based on the version\n * @param {number} version of the emsg box to validate\n * @param {Object} emsg the emsg data to validate\n * @returns if the box is valid as a boolean\n */\n\n var isValidEmsgBox = function (version, emsg) {\n var hasScheme = emsg.scheme_id_uri !== '\\0';\n var isValidV0Box = version === 0 && isDefined(emsg.presentation_time_delta) && hasScheme;\n var isValidV1Box = version === 1 && isDefined(emsg.presentation_time) && hasScheme; // Only valid versions of emsg are 0 and 1\n\n return !(version > 1) && isValidV0Box || isValidV1Box;\n }; // Utility function to check if an object is defined\n\n var isDefined = function (data) {\n return data !== undefined || data !== null;\n };\n var emsg$1 = {\n parseEmsgBox: parseEmsgBox,\n scaleTime: scaleTime\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Utilities to detect basic properties and metadata about MP4s.\n */\n\n var toUnsigned = bin.toUnsigned;\n var toHexString = bin.toHexString;\n var findBox = findBox_1;\n var parseType$1 = parseType_1;\n var emsg = emsg$1;\n var parseTfhd = parseTfhd$2;\n var parseTrun = parseTrun$2;\n var parseTfdt = parseTfdt$2;\n var getUint64 = numbers.getUint64;\n var timescale, startTime, compositionStartTime, getVideoTrackIds, getTracks, getTimescaleFromMediaHeader, getEmsgID3;\n var window$1 = window_1;\n var parseId3Frames = parseId3.parseId3Frames;\n /**\n * Parses an MP4 initialization segment and extracts the timescale\n * values for any declared tracks. Timescale values indicate the\n * number of clock ticks per second to assume for time-based values\n * elsewhere in the MP4.\n *\n * To determine the start time of an MP4, you need two pieces of\n * information: the timescale unit and the earliest base media decode\n * time. Multiple timescales can be specified within an MP4 but the\n * base media decode time is always expressed in the timescale from\n * the media header box for the track:\n * ```\n * moov > trak > mdia > mdhd.timescale\n * ```\n * @param init {Uint8Array} the bytes of the init segment\n * @return {object} a hash of track ids to timescale values or null if\n * the init segment is malformed.\n */\n\n timescale = function (init) {\n var result = {},\n traks = findBox(init, ['moov', 'trak']); // mdhd timescale\n\n return traks.reduce(function (result, trak) {\n var tkhd, version, index, id, mdhd;\n tkhd = findBox(trak, ['tkhd'])[0];\n if (!tkhd) {\n return null;\n }\n version = tkhd[0];\n index = version === 0 ? 12 : 20;\n id = toUnsigned(tkhd[index] << 24 | tkhd[index + 1] << 16 | tkhd[index + 2] << 8 | tkhd[index + 3]);\n mdhd = findBox(trak, ['mdia', 'mdhd'])[0];\n if (!mdhd) {\n return null;\n }\n version = mdhd[0];\n index = version === 0 ? 12 : 20;\n result[id] = toUnsigned(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);\n return result;\n }, result);\n };\n /**\n * Determine the base media decode start time, in seconds, for an MP4\n * fragment. If multiple fragments are specified, the earliest time is\n * returned.\n *\n * The base media decode time can be parsed from track fragment\n * metadata:\n * ```\n * moof > traf > tfdt.baseMediaDecodeTime\n * ```\n * It requires the timescale value from the mdhd to interpret.\n *\n * @param timescale {object} a hash of track ids to timescale values.\n * @return {number} the earliest base media decode start time for the\n * fragment, in seconds\n */\n\n startTime = function (timescale, fragment) {\n var trafs; // we need info from two childrend of each track fragment box\n\n trafs = findBox(fragment, ['moof', 'traf']); // determine the start times for each track\n\n var lowestTime = trafs.reduce(function (acc, traf) {\n var tfhd = findBox(traf, ['tfhd'])[0]; // get the track id from the tfhd\n\n var id = toUnsigned(tfhd[4] << 24 | tfhd[5] << 16 | tfhd[6] << 8 | tfhd[7]); // assume a 90kHz clock if no timescale was specified\n\n var scale = timescale[id] || 90e3; // get the base media decode time from the tfdt\n\n var tfdt = findBox(traf, ['tfdt'])[0];\n var dv = new DataView(tfdt.buffer, tfdt.byteOffset, tfdt.byteLength);\n var baseTime; // version 1 is 64 bit\n\n if (tfdt[0] === 1) {\n baseTime = getUint64(tfdt.subarray(4, 12));\n } else {\n baseTime = dv.getUint32(4);\n } // convert base time to seconds if it is a valid number.\n\n let seconds;\n if (typeof baseTime === 'bigint') {\n seconds = baseTime / window$1.BigInt(scale);\n } else if (typeof baseTime === 'number' && !isNaN(baseTime)) {\n seconds = baseTime / scale;\n }\n if (seconds < Number.MAX_SAFE_INTEGER) {\n seconds = Number(seconds);\n }\n if (seconds < acc) {\n acc = seconds;\n }\n return acc;\n }, Infinity);\n return typeof lowestTime === 'bigint' || isFinite(lowestTime) ? lowestTime : 0;\n };\n /**\n * Determine the composition start, in seconds, for an MP4\n * fragment.\n *\n * The composition start time of a fragment can be calculated using the base\n * media decode time, composition time offset, and timescale, as follows:\n *\n * compositionStartTime = (baseMediaDecodeTime + compositionTimeOffset) / timescale\n *\n * All of the aforementioned information is contained within a media fragment's\n * `traf` box, except for timescale info, which comes from the initialization\n * segment, so a track id (also contained within a `traf`) is also necessary to\n * associate it with a timescale\n *\n *\n * @param timescales {object} - a hash of track ids to timescale values.\n * @param fragment {Unit8Array} - the bytes of a media segment\n * @return {number} the composition start time for the fragment, in seconds\n **/\n\n compositionStartTime = function (timescales, fragment) {\n var trafBoxes = findBox(fragment, ['moof', 'traf']);\n var baseMediaDecodeTime = 0;\n var compositionTimeOffset = 0;\n var trackId;\n if (trafBoxes && trafBoxes.length) {\n // The spec states that track run samples contained within a `traf` box are contiguous, but\n // it does not explicitly state whether the `traf` boxes themselves are contiguous.\n // We will assume that they are, so we only need the first to calculate start time.\n var tfhd = findBox(trafBoxes[0], ['tfhd'])[0];\n var trun = findBox(trafBoxes[0], ['trun'])[0];\n var tfdt = findBox(trafBoxes[0], ['tfdt'])[0];\n if (tfhd) {\n var parsedTfhd = parseTfhd(tfhd);\n trackId = parsedTfhd.trackId;\n }\n if (tfdt) {\n var parsedTfdt = parseTfdt(tfdt);\n baseMediaDecodeTime = parsedTfdt.baseMediaDecodeTime;\n }\n if (trun) {\n var parsedTrun = parseTrun(trun);\n if (parsedTrun.samples && parsedTrun.samples.length) {\n compositionTimeOffset = parsedTrun.samples[0].compositionTimeOffset || 0;\n }\n }\n } // Get timescale for this specific track. Assume a 90kHz clock if no timescale was\n // specified.\n\n var timescale = timescales[trackId] || 90e3; // return the composition start time, in seconds\n\n if (typeof baseMediaDecodeTime === 'bigint') {\n compositionTimeOffset = window$1.BigInt(compositionTimeOffset);\n timescale = window$1.BigInt(timescale);\n }\n var result = (baseMediaDecodeTime + compositionTimeOffset) / timescale;\n if (typeof result === 'bigint' && result < Number.MAX_SAFE_INTEGER) {\n result = Number(result);\n }\n return result;\n };\n /**\n * Find the trackIds of the video tracks in this source.\n * Found by parsing the Handler Reference and Track Header Boxes:\n * moov > trak > mdia > hdlr\n * moov > trak > tkhd\n *\n * @param {Uint8Array} init - The bytes of the init segment for this source\n * @return {Number[]} A list of trackIds\n *\n * @see ISO-BMFF-12/2015, Section 8.4.3\n **/\n\n getVideoTrackIds = function (init) {\n var traks = findBox(init, ['moov', 'trak']);\n var videoTrackIds = [];\n traks.forEach(function (trak) {\n var hdlrs = findBox(trak, ['mdia', 'hdlr']);\n var tkhds = findBox(trak, ['tkhd']);\n hdlrs.forEach(function (hdlr, index) {\n var handlerType = parseType$1(hdlr.subarray(8, 12));\n var tkhd = tkhds[index];\n var view;\n var version;\n var trackId;\n if (handlerType === 'vide') {\n view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n version = view.getUint8(0);\n trackId = version === 0 ? view.getUint32(12) : view.getUint32(20);\n videoTrackIds.push(trackId);\n }\n });\n });\n return videoTrackIds;\n };\n getTimescaleFromMediaHeader = function (mdhd) {\n // mdhd is a FullBox, meaning it will have its own version as the first byte\n var version = mdhd[0];\n var index = version === 0 ? 12 : 20;\n return toUnsigned(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);\n };\n /**\n * Get all the video, audio, and hint tracks from a non fragmented\n * mp4 segment\n */\n\n getTracks = function (init) {\n var traks = findBox(init, ['moov', 'trak']);\n var tracks = [];\n traks.forEach(function (trak) {\n var track = {};\n var tkhd = findBox(trak, ['tkhd'])[0];\n var view, tkhdVersion; // id\n\n if (tkhd) {\n view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n tkhdVersion = view.getUint8(0);\n track.id = tkhdVersion === 0 ? view.getUint32(12) : view.getUint32(20);\n }\n var hdlr = findBox(trak, ['mdia', 'hdlr'])[0]; // type\n\n if (hdlr) {\n var type = parseType$1(hdlr.subarray(8, 12));\n if (type === 'vide') {\n track.type = 'video';\n } else if (type === 'soun') {\n track.type = 'audio';\n } else {\n track.type = type;\n }\n } // codec\n\n var stsd = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd'])[0];\n if (stsd) {\n var sampleDescriptions = stsd.subarray(8); // gives the codec type string\n\n track.codec = parseType$1(sampleDescriptions.subarray(4, 8));\n var codecBox = findBox(sampleDescriptions, [track.codec])[0];\n var codecConfig, codecConfigType;\n if (codecBox) {\n // https://tools.ietf.org/html/rfc6381#section-3.3\n if (/^[asm]vc[1-9]$/i.test(track.codec)) {\n // we don't need anything but the \"config\" parameter of the\n // avc1 codecBox\n codecConfig = codecBox.subarray(78);\n codecConfigType = parseType$1(codecConfig.subarray(4, 8));\n if (codecConfigType === 'avcC' && codecConfig.length > 11) {\n track.codec += '.'; // left padded with zeroes for single digit hex\n // profile idc\n\n track.codec += toHexString(codecConfig[9]); // the byte containing the constraint_set flags\n\n track.codec += toHexString(codecConfig[10]); // level idc\n\n track.codec += toHexString(codecConfig[11]);\n } else {\n // TODO: show a warning that we couldn't parse the codec\n // and are using the default\n track.codec = 'avc1.4d400d';\n }\n } else if (/^mp4[a,v]$/i.test(track.codec)) {\n // we do not need anything but the streamDescriptor of the mp4a codecBox\n codecConfig = codecBox.subarray(28);\n codecConfigType = parseType$1(codecConfig.subarray(4, 8));\n if (codecConfigType === 'esds' && codecConfig.length > 20 && codecConfig[19] !== 0) {\n track.codec += '.' + toHexString(codecConfig[19]); // this value is only a single digit\n\n track.codec += '.' + toHexString(codecConfig[20] >>> 2 & 0x3f).replace(/^0/, '');\n } else {\n // TODO: show a warning that we couldn't parse the codec\n // and are using the default\n track.codec = 'mp4a.40.2';\n }\n } else {\n // flac, opus, etc\n track.codec = track.codec.toLowerCase();\n }\n }\n }\n var mdhd = findBox(trak, ['mdia', 'mdhd'])[0];\n if (mdhd) {\n track.timescale = getTimescaleFromMediaHeader(mdhd);\n }\n tracks.push(track);\n });\n return tracks;\n };\n /**\n * Returns an array of emsg ID3 data from the provided segmentData.\n * An offset can also be provided as the Latest Arrival Time to calculate \n * the Event Start Time of v0 EMSG boxes. \n * See: https://dashif-documents.azurewebsites.net/Events/master/event.html#Inband-event-timing\n * \n * @param {Uint8Array} segmentData the segment byte array.\n * @param {number} offset the segment start time or Latest Arrival Time, \n * @return {Object[]} an array of ID3 parsed from EMSG boxes\n */\n\n getEmsgID3 = function (segmentData, offset = 0) {\n var emsgBoxes = findBox(segmentData, ['emsg']);\n return emsgBoxes.map(data => {\n var parsedBox = emsg.parseEmsgBox(new Uint8Array(data));\n var parsedId3Frames = parseId3Frames(parsedBox.message_data);\n return {\n cueTime: emsg.scaleTime(parsedBox.presentation_time, parsedBox.timescale, parsedBox.presentation_time_delta, offset),\n duration: emsg.scaleTime(parsedBox.event_duration, parsedBox.timescale),\n frames: parsedId3Frames\n };\n });\n };\n var probe$2 = {\n // export mp4 inspector's findBox and parseType for backwards compatibility\n findBox: findBox,\n parseType: parseType$1,\n timescale: timescale,\n startTime: startTime,\n compositionStartTime: compositionStartTime,\n videoTrackIds: getVideoTrackIds,\n tracks: getTracks,\n getTimescaleFromMediaHeader: getTimescaleFromMediaHeader,\n getEmsgID3: getEmsgID3\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Utilities to detect basic properties and metadata about TS Segments.\n */\n\n var StreamTypes$1 = streamTypes;\n var parsePid = function (packet) {\n var pid = packet[1] & 0x1f;\n pid <<= 8;\n pid |= packet[2];\n return pid;\n };\n var parsePayloadUnitStartIndicator = function (packet) {\n return !!(packet[1] & 0x40);\n };\n var parseAdaptionField = function (packet) {\n var offset = 0; // if an adaption field is present, its length is specified by the\n // fifth byte of the TS packet header. The adaptation field is\n // used to add stuffing to PES packets that don't fill a complete\n // TS packet, and to specify some forms of timing and control data\n // that we do not currently use.\n\n if ((packet[3] & 0x30) >>> 4 > 0x01) {\n offset += packet[4] + 1;\n }\n return offset;\n };\n var parseType = function (packet, pmtPid) {\n var pid = parsePid(packet);\n if (pid === 0) {\n return 'pat';\n } else if (pid === pmtPid) {\n return 'pmt';\n } else if (pmtPid) {\n return 'pes';\n }\n return null;\n };\n var parsePat = function (packet) {\n var pusi = parsePayloadUnitStartIndicator(packet);\n var offset = 4 + parseAdaptionField(packet);\n if (pusi) {\n offset += packet[offset] + 1;\n }\n return (packet[offset + 10] & 0x1f) << 8 | packet[offset + 11];\n };\n var parsePmt = function (packet) {\n var programMapTable = {};\n var pusi = parsePayloadUnitStartIndicator(packet);\n var payloadOffset = 4 + parseAdaptionField(packet);\n if (pusi) {\n payloadOffset += packet[payloadOffset] + 1;\n } // PMTs can be sent ahead of the time when they should actually\n // take effect. We don't believe this should ever be the case\n // for HLS but we'll ignore \"forward\" PMT declarations if we see\n // them. Future PMT declarations have the current_next_indicator\n // set to zero.\n\n if (!(packet[payloadOffset + 5] & 0x01)) {\n return;\n }\n var sectionLength, tableEnd, programInfoLength; // the mapping table ends at the end of the current section\n\n sectionLength = (packet[payloadOffset + 1] & 0x0f) << 8 | packet[payloadOffset + 2];\n tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n\n programInfoLength = (packet[payloadOffset + 10] & 0x0f) << 8 | packet[payloadOffset + 11]; // advance the offset to the first entry in the mapping table\n\n var offset = 12 + programInfoLength;\n while (offset < tableEnd) {\n var i = payloadOffset + offset; // add an entry that maps the elementary_pid to the stream_type\n\n programMapTable[(packet[i + 1] & 0x1F) << 8 | packet[i + 2]] = packet[i]; // move to the next table entry\n // skip past the elementary stream descriptors, if present\n\n offset += ((packet[i + 3] & 0x0F) << 8 | packet[i + 4]) + 5;\n }\n return programMapTable;\n };\n var parsePesType = function (packet, programMapTable) {\n var pid = parsePid(packet);\n var type = programMapTable[pid];\n switch (type) {\n case StreamTypes$1.H264_STREAM_TYPE:\n return 'video';\n case StreamTypes$1.ADTS_STREAM_TYPE:\n return 'audio';\n case StreamTypes$1.METADATA_STREAM_TYPE:\n return 'timed-metadata';\n default:\n return null;\n }\n };\n var parsePesTime = function (packet) {\n var pusi = parsePayloadUnitStartIndicator(packet);\n if (!pusi) {\n return null;\n }\n var offset = 4 + parseAdaptionField(packet);\n if (offset >= packet.byteLength) {\n // From the H 222.0 MPEG-TS spec\n // \"For transport stream packets carrying PES packets, stuffing is needed when there\n // is insufficient PES packet data to completely fill the transport stream packet\n // payload bytes. Stuffing is accomplished by defining an adaptation field longer than\n // the sum of the lengths of the data elements in it, so that the payload bytes\n // remaining after the adaptation field exactly accommodates the available PES packet\n // data.\"\n //\n // If the offset is >= the length of the packet, then the packet contains no data\n // and instead is just adaption field stuffing bytes\n return null;\n }\n var pes = null;\n var ptsDtsFlags; // PES packets may be annotated with a PTS value, or a PTS value\n // and a DTS value. Determine what combination of values is\n // available to work with.\n\n ptsDtsFlags = packet[offset + 7]; // PTS and DTS are normally stored as a 33-bit number. Javascript\n // performs all bitwise operations on 32-bit integers but javascript\n // supports a much greater range (52-bits) of integer using standard\n // mathematical operations.\n // We construct a 31-bit value using bitwise operators over the 31\n // most significant bits and then multiply by 4 (equal to a left-shift\n // of 2) before we add the final 2 least significant bits of the\n // timestamp (equal to an OR.)\n\n if (ptsDtsFlags & 0xC0) {\n pes = {}; // the PTS and DTS are not written out directly. For information\n // on how they are encoded, see\n // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n\n pes.pts = (packet[offset + 9] & 0x0E) << 27 | (packet[offset + 10] & 0xFF) << 20 | (packet[offset + 11] & 0xFE) << 12 | (packet[offset + 12] & 0xFF) << 5 | (packet[offset + 13] & 0xFE) >>> 3;\n pes.pts *= 4; // Left shift by 2\n\n pes.pts += (packet[offset + 13] & 0x06) >>> 1; // OR by the two LSBs\n\n pes.dts = pes.pts;\n if (ptsDtsFlags & 0x40) {\n pes.dts = (packet[offset + 14] & 0x0E) << 27 | (packet[offset + 15] & 0xFF) << 20 | (packet[offset + 16] & 0xFE) << 12 | (packet[offset + 17] & 0xFF) << 5 | (packet[offset + 18] & 0xFE) >>> 3;\n pes.dts *= 4; // Left shift by 2\n\n pes.dts += (packet[offset + 18] & 0x06) >>> 1; // OR by the two LSBs\n }\n }\n return pes;\n };\n var parseNalUnitType = function (type) {\n switch (type) {\n case 0x05:\n return 'slice_layer_without_partitioning_rbsp_idr';\n case 0x06:\n return 'sei_rbsp';\n case 0x07:\n return 'seq_parameter_set_rbsp';\n case 0x08:\n return 'pic_parameter_set_rbsp';\n case 0x09:\n return 'access_unit_delimiter_rbsp';\n default:\n return null;\n }\n };\n var videoPacketContainsKeyFrame = function (packet) {\n var offset = 4 + parseAdaptionField(packet);\n var frameBuffer = packet.subarray(offset);\n var frameI = 0;\n var frameSyncPoint = 0;\n var foundKeyFrame = false;\n var nalType; // advance the sync point to a NAL start, if necessary\n\n for (; frameSyncPoint < frameBuffer.byteLength - 3; frameSyncPoint++) {\n if (frameBuffer[frameSyncPoint + 2] === 1) {\n // the sync point is properly aligned\n frameI = frameSyncPoint + 5;\n break;\n }\n }\n while (frameI < frameBuffer.byteLength) {\n // look at the current byte to determine if we've hit the end of\n // a NAL unit boundary\n switch (frameBuffer[frameI]) {\n case 0:\n // skip past non-sync sequences\n if (frameBuffer[frameI - 1] !== 0) {\n frameI += 2;\n break;\n } else if (frameBuffer[frameI - 2] !== 0) {\n frameI++;\n break;\n }\n if (frameSyncPoint + 3 !== frameI - 2) {\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n } // drop trailing zeroes\n\n do {\n frameI++;\n } while (frameBuffer[frameI] !== 1 && frameI < frameBuffer.length);\n frameSyncPoint = frameI - 2;\n frameI += 3;\n break;\n case 1:\n // skip past non-sync sequences\n if (frameBuffer[frameI - 1] !== 0 || frameBuffer[frameI - 2] !== 0) {\n frameI += 3;\n break;\n }\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n frameSyncPoint = frameI - 2;\n frameI += 3;\n break;\n default:\n // the current byte isn't a one or zero, so it cannot be part\n // of a sync sequence\n frameI += 3;\n break;\n }\n }\n frameBuffer = frameBuffer.subarray(frameSyncPoint);\n frameI -= frameSyncPoint;\n frameSyncPoint = 0; // parse the final nal\n\n if (frameBuffer && frameBuffer.byteLength > 3) {\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n }\n return foundKeyFrame;\n };\n var probe$1 = {\n parseType: parseType,\n parsePat: parsePat,\n parsePmt: parsePmt,\n parsePayloadUnitStartIndicator: parsePayloadUnitStartIndicator,\n parsePesType: parsePesType,\n parsePesTime: parsePesTime,\n videoPacketContainsKeyFrame: videoPacketContainsKeyFrame\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Parse mpeg2 transport stream packets to extract basic timing information\n */\n\n var StreamTypes = streamTypes;\n var handleRollover = timestampRolloverStream.handleRollover;\n var probe = {};\n probe.ts = probe$1;\n probe.aac = utils;\n var ONE_SECOND_IN_TS = clock$2.ONE_SECOND_IN_TS;\n var MP2T_PACKET_LENGTH = 188,\n // bytes\n SYNC_BYTE = 0x47;\n /**\n * walks through segment data looking for pat and pmt packets to parse out\n * program map table information\n */\n\n var parsePsi_ = function (bytes, pmt) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH,\n packet,\n type;\n while (endIndex < bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pat':\n pmt.pid = probe.ts.parsePat(packet);\n break;\n case 'pmt':\n var table = probe.ts.parsePmt(packet);\n pmt.table = pmt.table || {};\n Object.keys(table).forEach(function (key) {\n pmt.table[key] = table[key];\n });\n break;\n }\n startIndex += MP2T_PACKET_LENGTH;\n endIndex += MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n }\n };\n /**\n * walks through the segment data from the start and end to get timing information\n * for the first and last audio pes packets\n */\n\n var parseAudioPes_ = function (bytes, pmt, result) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH,\n packet,\n type,\n pesType,\n pusi,\n parsed;\n var endLoop = false; // Start walking from start of segment to get first audio packet\n\n while (endIndex <= bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && (bytes[endIndex] === SYNC_BYTE || endIndex === bytes.byteLength)) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'audio' && pusi) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'audio';\n result.audio.push(parsed);\n endLoop = true;\n }\n }\n break;\n }\n if (endLoop) {\n break;\n }\n startIndex += MP2T_PACKET_LENGTH;\n endIndex += MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n } // Start walking from end of segment to get last audio packet\n\n endIndex = bytes.byteLength;\n startIndex = endIndex - MP2T_PACKET_LENGTH;\n endLoop = false;\n while (startIndex >= 0) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && (bytes[endIndex] === SYNC_BYTE || endIndex === bytes.byteLength)) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'audio' && pusi) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'audio';\n result.audio.push(parsed);\n endLoop = true;\n }\n }\n break;\n }\n if (endLoop) {\n break;\n }\n startIndex -= MP2T_PACKET_LENGTH;\n endIndex -= MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex--;\n endIndex--;\n }\n };\n /**\n * walks through the segment data from the start and end to get timing information\n * for the first and last video pes packets as well as timing information for the first\n * key frame.\n */\n\n var parseVideoPes_ = function (bytes, pmt, result) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH,\n packet,\n type,\n pesType,\n pusi,\n parsed,\n frame,\n i,\n pes;\n var endLoop = false;\n var currentFrame = {\n data: [],\n size: 0\n }; // Start walking from start of segment to get first video packet\n\n while (endIndex < bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'video') {\n if (pusi && !endLoop) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'video';\n result.video.push(parsed);\n endLoop = true;\n }\n }\n if (!result.firstKeyFrame) {\n if (pusi) {\n if (currentFrame.size !== 0) {\n frame = new Uint8Array(currentFrame.size);\n i = 0;\n while (currentFrame.data.length) {\n pes = currentFrame.data.shift();\n frame.set(pes, i);\n i += pes.byteLength;\n }\n if (probe.ts.videoPacketContainsKeyFrame(frame)) {\n var firstKeyFrame = probe.ts.parsePesTime(frame); // PTS/DTS may not be available. Simply *not* setting\n // the keyframe seems to work fine with HLS playback\n // and definitely preferable to a crash with TypeError...\n\n if (firstKeyFrame) {\n result.firstKeyFrame = firstKeyFrame;\n result.firstKeyFrame.type = 'video';\n } else {\n // eslint-disable-next-line\n console.warn('Failed to extract PTS/DTS from PES at first keyframe. ' + 'This could be an unusual TS segment, or else mux.js did not ' + 'parse your TS segment correctly. If you know your TS ' + 'segments do contain PTS/DTS on keyframes please file a bug ' + 'report! You can try ffprobe to double check for yourself.');\n }\n }\n currentFrame.size = 0;\n }\n }\n currentFrame.data.push(packet);\n currentFrame.size += packet.byteLength;\n }\n }\n break;\n }\n if (endLoop && result.firstKeyFrame) {\n break;\n }\n startIndex += MP2T_PACKET_LENGTH;\n endIndex += MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n } // Start walking from end of segment to get last video packet\n\n endIndex = bytes.byteLength;\n startIndex = endIndex - MP2T_PACKET_LENGTH;\n endLoop = false;\n while (startIndex >= 0) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'video' && pusi) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'video';\n result.video.push(parsed);\n endLoop = true;\n }\n }\n break;\n }\n if (endLoop) {\n break;\n }\n startIndex -= MP2T_PACKET_LENGTH;\n endIndex -= MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex--;\n endIndex--;\n }\n };\n /**\n * Adjusts the timestamp information for the segment to account for\n * rollover and convert to seconds based on pes packet timescale (90khz clock)\n */\n\n var adjustTimestamp_ = function (segmentInfo, baseTimestamp) {\n if (segmentInfo.audio && segmentInfo.audio.length) {\n var audioBaseTimestamp = baseTimestamp;\n if (typeof audioBaseTimestamp === 'undefined' || isNaN(audioBaseTimestamp)) {\n audioBaseTimestamp = segmentInfo.audio[0].dts;\n }\n segmentInfo.audio.forEach(function (info) {\n info.dts = handleRollover(info.dts, audioBaseTimestamp);\n info.pts = handleRollover(info.pts, audioBaseTimestamp); // time in seconds\n\n info.dtsTime = info.dts / ONE_SECOND_IN_TS;\n info.ptsTime = info.pts / ONE_SECOND_IN_TS;\n });\n }\n if (segmentInfo.video && segmentInfo.video.length) {\n var videoBaseTimestamp = baseTimestamp;\n if (typeof videoBaseTimestamp === 'undefined' || isNaN(videoBaseTimestamp)) {\n videoBaseTimestamp = segmentInfo.video[0].dts;\n }\n segmentInfo.video.forEach(function (info) {\n info.dts = handleRollover(info.dts, videoBaseTimestamp);\n info.pts = handleRollover(info.pts, videoBaseTimestamp); // time in seconds\n\n info.dtsTime = info.dts / ONE_SECOND_IN_TS;\n info.ptsTime = info.pts / ONE_SECOND_IN_TS;\n });\n if (segmentInfo.firstKeyFrame) {\n var frame = segmentInfo.firstKeyFrame;\n frame.dts = handleRollover(frame.dts, videoBaseTimestamp);\n frame.pts = handleRollover(frame.pts, videoBaseTimestamp); // time in seconds\n\n frame.dtsTime = frame.dts / ONE_SECOND_IN_TS;\n frame.ptsTime = frame.pts / ONE_SECOND_IN_TS;\n }\n }\n };\n /**\n * inspects the aac data stream for start and end time information\n */\n\n var inspectAac_ = function (bytes) {\n var endLoop = false,\n audioCount = 0,\n sampleRate = null,\n timestamp = null,\n frameSize = 0,\n byteIndex = 0,\n packet;\n while (bytes.length - byteIndex >= 3) {\n var type = probe.aac.parseType(bytes, byteIndex);\n switch (type) {\n case 'timed-metadata':\n // Exit early because we don't have enough to parse\n // the ID3 tag header\n if (bytes.length - byteIndex < 10) {\n endLoop = true;\n break;\n }\n frameSize = probe.aac.parseId3TagSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (frameSize > bytes.length) {\n endLoop = true;\n break;\n }\n if (timestamp === null) {\n packet = bytes.subarray(byteIndex, byteIndex + frameSize);\n timestamp = probe.aac.parseAacTimestamp(packet);\n }\n byteIndex += frameSize;\n break;\n case 'audio':\n // Exit early because we don't have enough to parse\n // the ADTS frame header\n if (bytes.length - byteIndex < 7) {\n endLoop = true;\n break;\n }\n frameSize = probe.aac.parseAdtsSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (frameSize > bytes.length) {\n endLoop = true;\n break;\n }\n if (sampleRate === null) {\n packet = bytes.subarray(byteIndex, byteIndex + frameSize);\n sampleRate = probe.aac.parseSampleRate(packet);\n }\n audioCount++;\n byteIndex += frameSize;\n break;\n default:\n byteIndex++;\n break;\n }\n if (endLoop) {\n return null;\n }\n }\n if (sampleRate === null || timestamp === null) {\n return null;\n }\n var audioTimescale = ONE_SECOND_IN_TS / sampleRate;\n var result = {\n audio: [{\n type: 'audio',\n dts: timestamp,\n pts: timestamp\n }, {\n type: 'audio',\n dts: timestamp + audioCount * 1024 * audioTimescale,\n pts: timestamp + audioCount * 1024 * audioTimescale\n }]\n };\n return result;\n };\n /**\n * inspects the transport stream segment data for start and end time information\n * of the audio and video tracks (when present) as well as the first key frame's\n * start time.\n */\n\n var inspectTs_ = function (bytes) {\n var pmt = {\n pid: null,\n table: null\n };\n var result = {};\n parsePsi_(bytes, pmt);\n for (var pid in pmt.table) {\n if (pmt.table.hasOwnProperty(pid)) {\n var type = pmt.table[pid];\n switch (type) {\n case StreamTypes.H264_STREAM_TYPE:\n result.video = [];\n parseVideoPes_(bytes, pmt, result);\n if (result.video.length === 0) {\n delete result.video;\n }\n break;\n case StreamTypes.ADTS_STREAM_TYPE:\n result.audio = [];\n parseAudioPes_(bytes, pmt, result);\n if (result.audio.length === 0) {\n delete result.audio;\n }\n break;\n }\n }\n }\n return result;\n };\n /**\n * Inspects segment byte data and returns an object with start and end timing information\n *\n * @param {Uint8Array} bytes The segment byte data\n * @param {Number} baseTimestamp Relative reference timestamp used when adjusting frame\n * timestamps for rollover. This value must be in 90khz clock.\n * @return {Object} Object containing start and end frame timing info of segment.\n */\n\n var inspect = function (bytes, baseTimestamp) {\n var isAacData = probe.aac.isLikelyAacData(bytes);\n var result;\n if (isAacData) {\n result = inspectAac_(bytes);\n } else {\n result = inspectTs_(bytes);\n }\n if (!result || !result.audio && !result.video) {\n return null;\n }\n adjustTimestamp_(result, baseTimestamp);\n return result;\n };\n var tsInspector = {\n inspect: inspect,\n parseAudioPes_: parseAudioPes_\n };\n /* global self */\n\n /**\n * Re-emits transmuxer events by converting them into messages to the\n * world outside the worker.\n *\n * @param {Object} transmuxer the transmuxer to wire events on\n * @private\n */\n\n const wireTransmuxerEvents = function (self, transmuxer) {\n transmuxer.on('data', function (segment) {\n // transfer ownership of the underlying ArrayBuffer\n // instead of doing a copy to save memory\n // ArrayBuffers are transferable but generic TypedArrays are not\n // @link https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers#Passing_data_by_transferring_ownership_(transferable_objects)\n const initArray = segment.initSegment;\n segment.initSegment = {\n data: initArray.buffer,\n byteOffset: initArray.byteOffset,\n byteLength: initArray.byteLength\n };\n const typedArray = segment.data;\n segment.data = typedArray.buffer;\n self.postMessage({\n action: 'data',\n segment,\n byteOffset: typedArray.byteOffset,\n byteLength: typedArray.byteLength\n }, [segment.data]);\n });\n transmuxer.on('done', function (data) {\n self.postMessage({\n action: 'done'\n });\n });\n transmuxer.on('gopInfo', function (gopInfo) {\n self.postMessage({\n action: 'gopInfo',\n gopInfo\n });\n });\n transmuxer.on('videoSegmentTimingInfo', function (timingInfo) {\n const videoSegmentTimingInfo = {\n start: {\n decode: clock$2.videoTsToSeconds(timingInfo.start.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.start.pts)\n },\n end: {\n decode: clock$2.videoTsToSeconds(timingInfo.end.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.end.pts)\n },\n baseMediaDecodeTime: clock$2.videoTsToSeconds(timingInfo.baseMediaDecodeTime)\n };\n if (timingInfo.prependedContentDuration) {\n videoSegmentTimingInfo.prependedContentDuration = clock$2.videoTsToSeconds(timingInfo.prependedContentDuration);\n }\n self.postMessage({\n action: 'videoSegmentTimingInfo',\n videoSegmentTimingInfo\n });\n });\n transmuxer.on('audioSegmentTimingInfo', function (timingInfo) {\n // Note that all times for [audio/video]SegmentTimingInfo events are in video clock\n const audioSegmentTimingInfo = {\n start: {\n decode: clock$2.videoTsToSeconds(timingInfo.start.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.start.pts)\n },\n end: {\n decode: clock$2.videoTsToSeconds(timingInfo.end.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.end.pts)\n },\n baseMediaDecodeTime: clock$2.videoTsToSeconds(timingInfo.baseMediaDecodeTime)\n };\n if (timingInfo.prependedContentDuration) {\n audioSegmentTimingInfo.prependedContentDuration = clock$2.videoTsToSeconds(timingInfo.prependedContentDuration);\n }\n self.postMessage({\n action: 'audioSegmentTimingInfo',\n audioSegmentTimingInfo\n });\n });\n transmuxer.on('id3Frame', function (id3Frame) {\n self.postMessage({\n action: 'id3Frame',\n id3Frame\n });\n });\n transmuxer.on('caption', function (caption) {\n self.postMessage({\n action: 'caption',\n caption\n });\n });\n transmuxer.on('trackinfo', function (trackInfo) {\n self.postMessage({\n action: 'trackinfo',\n trackInfo\n });\n });\n transmuxer.on('audioTimingInfo', function (audioTimingInfo) {\n // convert to video TS since we prioritize video time over audio\n self.postMessage({\n action: 'audioTimingInfo',\n audioTimingInfo: {\n start: clock$2.videoTsToSeconds(audioTimingInfo.start),\n end: clock$2.videoTsToSeconds(audioTimingInfo.end)\n }\n });\n });\n transmuxer.on('videoTimingInfo', function (videoTimingInfo) {\n self.postMessage({\n action: 'videoTimingInfo',\n videoTimingInfo: {\n start: clock$2.videoTsToSeconds(videoTimingInfo.start),\n end: clock$2.videoTsToSeconds(videoTimingInfo.end)\n }\n });\n });\n transmuxer.on('log', function (log) {\n self.postMessage({\n action: 'log',\n log\n });\n });\n };\n /**\n * All incoming messages route through this hash. If no function exists\n * to handle an incoming message, then we ignore the message.\n *\n * @class MessageHandlers\n * @param {Object} options the options to initialize with\n */\n\n class MessageHandlers {\n constructor(self, options) {\n this.options = options || {};\n this.self = self;\n this.init();\n }\n /**\n * initialize our web worker and wire all the events.\n */\n\n init() {\n if (this.transmuxer) {\n this.transmuxer.dispose();\n }\n this.transmuxer = new transmuxer.Transmuxer(this.options);\n wireTransmuxerEvents(this.self, this.transmuxer);\n }\n pushMp4Captions(data) {\n if (!this.captionParser) {\n this.captionParser = new captionParser();\n this.captionParser.init();\n }\n const segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);\n const parsed = this.captionParser.parse(segment, data.trackIds, data.timescales);\n this.self.postMessage({\n action: 'mp4Captions',\n captions: parsed && parsed.captions || [],\n logs: parsed && parsed.logs || [],\n data: segment.buffer\n }, [segment.buffer]);\n }\n probeMp4StartTime({\n timescales,\n data\n }) {\n const startTime = probe$2.startTime(timescales, data);\n this.self.postMessage({\n action: 'probeMp4StartTime',\n startTime,\n data\n }, [data.buffer]);\n }\n probeMp4Tracks({\n data\n }) {\n const tracks = probe$2.tracks(data);\n this.self.postMessage({\n action: 'probeMp4Tracks',\n tracks,\n data\n }, [data.buffer]);\n }\n /**\n * Probes an mp4 segment for EMSG boxes containing ID3 data.\n * https://aomediacodec.github.io/id3-emsg/\n *\n * @param {Uint8Array} data segment data\n * @param {number} offset segment start time\n * @return {Object[]} an array of ID3 frames\n */\n\n probeEmsgID3({\n data,\n offset\n }) {\n const id3Frames = probe$2.getEmsgID3(data, offset);\n this.self.postMessage({\n action: 'probeEmsgID3',\n id3Frames,\n emsgData: data\n }, [data.buffer]);\n }\n /**\n * Probe an mpeg2-ts segment to determine the start time of the segment in it's\n * internal \"media time,\" as well as whether it contains video and/or audio.\n *\n * @private\n * @param {Uint8Array} bytes - segment bytes\n * @param {number} baseStartTime\n * Relative reference timestamp used when adjusting frame timestamps for rollover.\n * This value should be in seconds, as it's converted to a 90khz clock within the\n * function body.\n * @return {Object} The start time of the current segment in \"media time\" as well as\n * whether it contains video and/or audio\n */\n\n probeTs({\n data,\n baseStartTime\n }) {\n const tsStartTime = typeof baseStartTime === 'number' && !isNaN(baseStartTime) ? baseStartTime * clock$2.ONE_SECOND_IN_TS : void 0;\n const timeInfo = tsInspector.inspect(data, tsStartTime);\n let result = null;\n if (timeInfo) {\n result = {\n // each type's time info comes back as an array of 2 times, start and end\n hasVideo: timeInfo.video && timeInfo.video.length === 2 || false,\n hasAudio: timeInfo.audio && timeInfo.audio.length === 2 || false\n };\n if (result.hasVideo) {\n result.videoStart = timeInfo.video[0].ptsTime;\n }\n if (result.hasAudio) {\n result.audioStart = timeInfo.audio[0].ptsTime;\n }\n }\n this.self.postMessage({\n action: 'probeTs',\n result,\n data\n }, [data.buffer]);\n }\n clearAllMp4Captions() {\n if (this.captionParser) {\n this.captionParser.clearAllCaptions();\n }\n }\n clearParsedMp4Captions() {\n if (this.captionParser) {\n this.captionParser.clearParsedCaptions();\n }\n }\n /**\n * Adds data (a ts segment) to the start of the transmuxer pipeline for\n * processing.\n *\n * @param {ArrayBuffer} data data to push into the muxer\n */\n\n push(data) {\n // Cast array buffer to correct type for transmuxer\n const segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);\n this.transmuxer.push(segment);\n }\n /**\n * Recreate the transmuxer so that the next segment added via `push`\n * start with a fresh transmuxer.\n */\n\n reset() {\n this.transmuxer.reset();\n }\n /**\n * Set the value that will be used as the `baseMediaDecodeTime` time for the\n * next segment pushed in. Subsequent segments will have their `baseMediaDecodeTime`\n * set relative to the first based on the PTS values.\n *\n * @param {Object} data used to set the timestamp offset in the muxer\n */\n\n setTimestampOffset(data) {\n const timestampOffset = data.timestampOffset || 0;\n this.transmuxer.setBaseMediaDecodeTime(Math.round(clock$2.secondsToVideoTs(timestampOffset)));\n }\n setAudioAppendStart(data) {\n this.transmuxer.setAudioAppendStart(Math.ceil(clock$2.secondsToVideoTs(data.appendStart)));\n }\n setRemux(data) {\n this.transmuxer.setRemux(data.remux);\n }\n /**\n * Forces the pipeline to finish processing the last segment and emit it's\n * results.\n *\n * @param {Object} data event data, not really used\n */\n\n flush(data) {\n this.transmuxer.flush(); // transmuxed done action is fired after both audio/video pipelines are flushed\n\n self.postMessage({\n action: 'done',\n type: 'transmuxed'\n });\n }\n endTimeline() {\n this.transmuxer.endTimeline(); // transmuxed endedtimeline action is fired after both audio/video pipelines end their\n // timelines\n\n self.postMessage({\n action: 'endedtimeline',\n type: 'transmuxed'\n });\n }\n alignGopsWith(data) {\n this.transmuxer.alignGopsWith(data.gopsToAlignWith.slice());\n }\n }\n /**\n * Our web worker interface so that things can talk to mux.js\n * that will be running in a web worker. the scope is passed to this by\n * webworkify.\n *\n * @param {Object} self the scope for the web worker\n */\n\n self.onmessage = function (event) {\n if (event.data.action === 'init' && event.data.options) {\n this.messageHandlers = new MessageHandlers(self, event.data.options);\n return;\n }\n if (!this.messageHandlers) {\n this.messageHandlers = new MessageHandlers(self);\n }\n if (event.data && event.data.action && event.data.action !== 'init') {\n if (this.messageHandlers[event.data.action]) {\n this.messageHandlers[event.data.action](event.data);\n }\n }\n };\n}));\nvar TransmuxWorker = factory(workerCode$1);\n/* rollup-plugin-worker-factory end for worker!/home/runner/work/http-streaming/http-streaming/src/transmuxer-worker.js */\n\nconst handleData_ = (event, transmuxedData, callback) => {\n const {\n type,\n initSegment,\n captions,\n captionStreams,\n metadata,\n videoFrameDtsTime,\n videoFramePtsTime\n } = event.data.segment;\n transmuxedData.buffer.push({\n captions,\n captionStreams,\n metadata\n });\n const boxes = event.data.segment.boxes || {\n data: event.data.segment.data\n };\n const result = {\n type,\n // cast ArrayBuffer to TypedArray\n data: new Uint8Array(boxes.data, boxes.data.byteOffset, boxes.data.byteLength),\n initSegment: new Uint8Array(initSegment.data, initSegment.byteOffset, initSegment.byteLength)\n };\n if (typeof videoFrameDtsTime !== 'undefined') {\n result.videoFrameDtsTime = videoFrameDtsTime;\n }\n if (typeof videoFramePtsTime !== 'undefined') {\n result.videoFramePtsTime = videoFramePtsTime;\n }\n callback(result);\n};\nconst handleDone_ = ({\n transmuxedData,\n callback\n}) => {\n // Previously we only returned data on data events,\n // not on done events. Clear out the buffer to keep that consistent.\n transmuxedData.buffer = []; // all buffers should have been flushed from the muxer, so start processing anything we\n // have received\n\n callback(transmuxedData);\n};\nconst handleGopInfo_ = (event, transmuxedData) => {\n transmuxedData.gopInfo = event.data.gopInfo;\n};\nconst processTransmux = options => {\n const {\n transmuxer,\n bytes,\n audioAppendStart,\n gopsToAlignWith,\n remux,\n onData,\n onTrackInfo,\n onAudioTimingInfo,\n onVideoTimingInfo,\n onVideoSegmentTimingInfo,\n onAudioSegmentTimingInfo,\n onId3,\n onCaptions,\n onDone,\n onEndedTimeline,\n onTransmuxerLog,\n isEndOfTimeline,\n segment,\n triggerSegmentEventFn\n } = options;\n const transmuxedData = {\n buffer: []\n };\n let waitForEndedTimelineEvent = isEndOfTimeline;\n const handleMessage = event => {\n if (transmuxer.currentTransmux !== options) {\n // disposed\n return;\n }\n if (event.data.action === 'data') {\n handleData_(event, transmuxedData, onData);\n }\n if (event.data.action === 'trackinfo') {\n onTrackInfo(event.data.trackInfo);\n }\n if (event.data.action === 'gopInfo') {\n handleGopInfo_(event, transmuxedData);\n }\n if (event.data.action === 'audioTimingInfo') {\n onAudioTimingInfo(event.data.audioTimingInfo);\n }\n if (event.data.action === 'videoTimingInfo') {\n onVideoTimingInfo(event.data.videoTimingInfo);\n }\n if (event.data.action === 'videoSegmentTimingInfo') {\n onVideoSegmentTimingInfo(event.data.videoSegmentTimingInfo);\n }\n if (event.data.action === 'audioSegmentTimingInfo') {\n onAudioSegmentTimingInfo(event.data.audioSegmentTimingInfo);\n }\n if (event.data.action === 'id3Frame') {\n onId3([event.data.id3Frame], event.data.id3Frame.dispatchType);\n }\n if (event.data.action === 'caption') {\n onCaptions(event.data.caption);\n }\n if (event.data.action === 'endedtimeline') {\n waitForEndedTimelineEvent = false;\n onEndedTimeline();\n }\n if (event.data.action === 'log') {\n onTransmuxerLog(event.data.log);\n } // wait for the transmuxed event since we may have audio and video\n\n if (event.data.type !== 'transmuxed') {\n return;\n } // If the \"endedtimeline\" event has not yet fired, and this segment represents the end\n // of a timeline, that means there may still be data events before the segment\n // processing can be considerred complete. In that case, the final event should be\n // an \"endedtimeline\" event with the type \"transmuxed.\"\n\n if (waitForEndedTimelineEvent) {\n return;\n }\n transmuxer.onmessage = null;\n handleDone_({\n transmuxedData,\n callback: onDone\n });\n /* eslint-disable no-use-before-define */\n\n dequeue(transmuxer);\n /* eslint-enable */\n };\n const handleError = () => {\n const error = {\n message: 'Received an error message from the transmuxer worker',\n metadata: {\n errorType: videojs.Error.StreamingFailedToTransmuxSegment,\n segmentInfo: segmentInfoPayload({\n segment\n })\n }\n };\n onDone(null, error);\n };\n transmuxer.onmessage = handleMessage;\n transmuxer.onerror = handleError;\n if (audioAppendStart) {\n transmuxer.postMessage({\n action: 'setAudioAppendStart',\n appendStart: audioAppendStart\n });\n } // allow empty arrays to be passed to clear out GOPs\n\n if (Array.isArray(gopsToAlignWith)) {\n transmuxer.postMessage({\n action: 'alignGopsWith',\n gopsToAlignWith\n });\n }\n if (typeof remux !== 'undefined') {\n transmuxer.postMessage({\n action: 'setRemux',\n remux\n });\n }\n if (bytes.byteLength) {\n const buffer = bytes instanceof ArrayBuffer ? bytes : bytes.buffer;\n const byteOffset = bytes instanceof ArrayBuffer ? 0 : bytes.byteOffset;\n triggerSegmentEventFn({\n type: 'segmenttransmuxingstart',\n segment\n });\n transmuxer.postMessage({\n action: 'push',\n // Send the typed-array of data as an ArrayBuffer so that\n // it can be sent as a \"Transferable\" and avoid the costly\n // memory copy\n data: buffer,\n // To recreate the original typed-array, we need information\n // about what portion of the ArrayBuffer it was a view into\n byteOffset,\n byteLength: bytes.byteLength\n }, [buffer]);\n }\n if (isEndOfTimeline) {\n transmuxer.postMessage({\n action: 'endTimeline'\n });\n } // even if we didn't push any bytes, we have to make sure we flush in case we reached\n // the end of the segment\n\n transmuxer.postMessage({\n action: 'flush'\n });\n};\nconst dequeue = transmuxer => {\n transmuxer.currentTransmux = null;\n if (transmuxer.transmuxQueue.length) {\n transmuxer.currentTransmux = transmuxer.transmuxQueue.shift();\n if (typeof transmuxer.currentTransmux === 'function') {\n transmuxer.currentTransmux();\n } else {\n processTransmux(transmuxer.currentTransmux);\n }\n }\n};\nconst processAction = (transmuxer, action) => {\n transmuxer.postMessage({\n action\n });\n dequeue(transmuxer);\n};\nconst enqueueAction = (action, transmuxer) => {\n if (!transmuxer.currentTransmux) {\n transmuxer.currentTransmux = action;\n processAction(transmuxer, action);\n return;\n }\n transmuxer.transmuxQueue.push(processAction.bind(null, transmuxer, action));\n};\nconst reset = transmuxer => {\n enqueueAction('reset', transmuxer);\n};\nconst endTimeline = transmuxer => {\n enqueueAction('endTimeline', transmuxer);\n};\nconst transmux = options => {\n if (!options.transmuxer.currentTransmux) {\n options.transmuxer.currentTransmux = options;\n processTransmux(options);\n return;\n }\n options.transmuxer.transmuxQueue.push(options);\n};\nconst createTransmuxer = options => {\n const transmuxer = new TransmuxWorker();\n transmuxer.currentTransmux = null;\n transmuxer.transmuxQueue = [];\n const term = transmuxer.terminate;\n transmuxer.terminate = () => {\n transmuxer.currentTransmux = null;\n transmuxer.transmuxQueue.length = 0;\n return term.call(transmuxer);\n };\n transmuxer.postMessage({\n action: 'init',\n options\n });\n return transmuxer;\n};\nvar segmentTransmuxer = {\n reset,\n endTimeline,\n transmux,\n createTransmuxer\n};\nconst workerCallback = function (options) {\n const transmuxer = options.transmuxer;\n const endAction = options.endAction || options.action;\n const callback = options.callback;\n const message = _extends({}, options, {\n endAction: null,\n transmuxer: null,\n callback: null\n });\n const listenForEndEvent = event => {\n if (event.data.action !== endAction) {\n return;\n }\n transmuxer.removeEventListener('message', listenForEndEvent); // transfer ownership of bytes back to us.\n\n if (event.data.data) {\n event.data.data = new Uint8Array(event.data.data, options.byteOffset || 0, options.byteLength || event.data.data.byteLength);\n if (options.data) {\n options.data = event.data.data;\n }\n }\n callback(event.data);\n };\n transmuxer.addEventListener('message', listenForEndEvent);\n if (options.data) {\n const isArrayBuffer = options.data instanceof ArrayBuffer;\n message.byteOffset = isArrayBuffer ? 0 : options.data.byteOffset;\n message.byteLength = options.data.byteLength;\n const transfers = [isArrayBuffer ? options.data : options.data.buffer];\n transmuxer.postMessage(message, transfers);\n } else {\n transmuxer.postMessage(message);\n }\n};\nconst REQUEST_ERRORS = {\n FAILURE: 2,\n TIMEOUT: -101,\n ABORTED: -102\n};\n/**\n * Abort all requests\n *\n * @param {Object} activeXhrs - an object that tracks all XHR requests\n */\n\nconst abortAll = activeXhrs => {\n activeXhrs.forEach(xhr => {\n xhr.abort();\n });\n};\n/**\n * Gather important bandwidth stats once a request has completed\n *\n * @param {Object} request - the XHR request from which to gather stats\n */\n\nconst getRequestStats = request => {\n return {\n bandwidth: request.bandwidth,\n bytesReceived: request.bytesReceived || 0,\n roundTripTime: request.roundTripTime || 0\n };\n};\n/**\n * If possible gather bandwidth stats as a request is in\n * progress\n *\n * @param {Event} progressEvent - an event object from an XHR's progress event\n */\n\nconst getProgressStats = progressEvent => {\n const request = progressEvent.target;\n const roundTripTime = Date.now() - request.requestTime;\n const stats = {\n bandwidth: Infinity,\n bytesReceived: 0,\n roundTripTime: roundTripTime || 0\n };\n stats.bytesReceived = progressEvent.loaded; // This can result in Infinity if stats.roundTripTime is 0 but that is ok\n // because we should only use bandwidth stats on progress to determine when\n // abort a request early due to insufficient bandwidth\n\n stats.bandwidth = Math.floor(stats.bytesReceived / stats.roundTripTime * 8 * 1000);\n return stats;\n};\n/**\n * Handle all error conditions in one place and return an object\n * with all the information\n *\n * @param {Error|null} error - if non-null signals an error occured with the XHR\n * @param {Object} request - the XHR request that possibly generated the error\n */\n\nconst handleErrors = (error, request) => {\n const {\n requestType\n } = request;\n const metadata = getStreamingNetworkErrorMetadata({\n requestType,\n request,\n error\n });\n if (request.timedout) {\n return {\n status: request.status,\n message: 'HLS request timed-out at URL: ' + request.uri,\n code: REQUEST_ERRORS.TIMEOUT,\n xhr: request,\n metadata\n };\n }\n if (request.aborted) {\n return {\n status: request.status,\n message: 'HLS request aborted at URL: ' + request.uri,\n code: REQUEST_ERRORS.ABORTED,\n xhr: request,\n metadata\n };\n }\n if (error) {\n return {\n status: request.status,\n message: 'HLS request errored at URL: ' + request.uri,\n code: REQUEST_ERRORS.FAILURE,\n xhr: request,\n metadata\n };\n }\n if (request.responseType === 'arraybuffer' && request.response.byteLength === 0) {\n return {\n status: request.status,\n message: 'Empty HLS response at URL: ' + request.uri,\n code: REQUEST_ERRORS.FAILURE,\n xhr: request,\n metadata\n };\n }\n return null;\n};\n/**\n * Handle responses for key data and convert the key data to the correct format\n * for the decryption step later\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Array} objects - objects to add the key bytes to.\n * @param {Function} finishProcessingFn - a callback to execute to continue processing\n * this request\n */\n\nconst handleKeyResponse = (segment, objects, finishProcessingFn, triggerSegmentEventFn) => (error, request) => {\n const response = request.response;\n const errorObj = handleErrors(error, request);\n if (errorObj) {\n return finishProcessingFn(errorObj, segment);\n }\n if (response.byteLength !== 16) {\n return finishProcessingFn({\n status: request.status,\n message: 'Invalid HLS key at URL: ' + request.uri,\n code: REQUEST_ERRORS.FAILURE,\n xhr: request\n }, segment);\n }\n const view = new DataView(response);\n const bytes = new Uint32Array([view.getUint32(0), view.getUint32(4), view.getUint32(8), view.getUint32(12)]);\n for (let i = 0; i < objects.length; i++) {\n objects[i].bytes = bytes;\n }\n const keyInfo = {\n uri: request.uri\n };\n triggerSegmentEventFn({\n type: 'segmentkeyloadcomplete',\n segment,\n keyInfo\n });\n return finishProcessingFn(null, segment);\n};\nconst parseInitSegment = (segment, callback) => {\n const type = detectContainerForBytes(segment.map.bytes); // TODO: We should also handle ts init segments here, but we\n // only know how to parse mp4 init segments at the moment\n\n if (type !== 'mp4') {\n const uri = segment.map.resolvedUri || segment.map.uri;\n const mediaType = type || 'unknown';\n return callback({\n internal: true,\n message: `Found unsupported ${mediaType} container for initialization segment at URL: ${uri}`,\n code: REQUEST_ERRORS.FAILURE,\n metadata: {\n mediaType\n }\n });\n }\n workerCallback({\n action: 'probeMp4Tracks',\n data: segment.map.bytes,\n transmuxer: segment.transmuxer,\n callback: ({\n tracks,\n data\n }) => {\n // transfer bytes back to us\n segment.map.bytes = data;\n tracks.forEach(function (track) {\n segment.map.tracks = segment.map.tracks || {}; // only support one track of each type for now\n\n if (segment.map.tracks[track.type]) {\n return;\n }\n segment.map.tracks[track.type] = track;\n if (typeof track.id === 'number' && track.timescale) {\n segment.map.timescales = segment.map.timescales || {};\n segment.map.timescales[track.id] = track.timescale;\n }\n });\n return callback(null);\n }\n });\n};\n/**\n * Handle init-segment responses\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} finishProcessingFn - a callback to execute to continue processing\n * this request\n */\n\nconst handleInitSegmentResponse = ({\n segment,\n finishProcessingFn,\n triggerSegmentEventFn\n}) => (error, request) => {\n const errorObj = handleErrors(error, request);\n if (errorObj) {\n return finishProcessingFn(errorObj, segment);\n }\n const bytes = new Uint8Array(request.response);\n triggerSegmentEventFn({\n type: 'segmentloaded',\n segment\n }); // init segment is encypted, we will have to wait\n // until the key request is done to decrypt.\n\n if (segment.map.key) {\n segment.map.encryptedBytes = bytes;\n return finishProcessingFn(null, segment);\n }\n segment.map.bytes = bytes;\n parseInitSegment(segment, function (parseError) {\n if (parseError) {\n parseError.xhr = request;\n parseError.status = request.status;\n return finishProcessingFn(parseError, segment);\n }\n finishProcessingFn(null, segment);\n });\n};\n/**\n * Response handler for segment-requests being sure to set the correct\n * property depending on whether the segment is encryped or not\n * Also records and keeps track of stats that are used for ABR purposes\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} finishProcessingFn - a callback to execute to continue processing\n * this request\n */\n\nconst handleSegmentResponse = ({\n segment,\n finishProcessingFn,\n responseType,\n triggerSegmentEventFn\n}) => (error, request) => {\n const errorObj = handleErrors(error, request);\n if (errorObj) {\n return finishProcessingFn(errorObj, segment);\n }\n triggerSegmentEventFn({\n type: 'segmentloaded',\n segment\n });\n const newBytes =\n // although responseText \"should\" exist, this guard serves to prevent an error being\n // thrown for two primary cases:\n // 1. the mime type override stops working, or is not implemented for a specific\n // browser\n // 2. when using mock XHR libraries like sinon that do not allow the override behavior\n responseType === 'arraybuffer' || !request.responseText ? request.response : stringToArrayBuffer(request.responseText.substring(segment.lastReachedChar || 0));\n segment.stats = getRequestStats(request);\n if (segment.key) {\n segment.encryptedBytes = new Uint8Array(newBytes);\n } else {\n segment.bytes = new Uint8Array(newBytes);\n }\n return finishProcessingFn(null, segment);\n};\nconst transmuxAndNotify = ({\n segment,\n bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n}) => {\n const fmp4Tracks = segment.map && segment.map.tracks || {};\n const isMuxed = Boolean(fmp4Tracks.audio && fmp4Tracks.video); // Keep references to each function so we can null them out after we're done with them.\n // One reason for this is that in the case of full segments, we want to trust start\n // times from the probe, rather than the transmuxer.\n\n let audioStartFn = timingInfoFn.bind(null, segment, 'audio', 'start');\n const audioEndFn = timingInfoFn.bind(null, segment, 'audio', 'end');\n let videoStartFn = timingInfoFn.bind(null, segment, 'video', 'start');\n const videoEndFn = timingInfoFn.bind(null, segment, 'video', 'end');\n const finish = () => transmux({\n bytes,\n transmuxer: segment.transmuxer,\n audioAppendStart: segment.audioAppendStart,\n gopsToAlignWith: segment.gopsToAlignWith,\n remux: isMuxed,\n onData: result => {\n result.type = result.type === 'combined' ? 'video' : result.type;\n dataFn(segment, result);\n },\n onTrackInfo: trackInfo => {\n if (trackInfoFn) {\n if (isMuxed) {\n trackInfo.isMuxed = true;\n }\n trackInfoFn(segment, trackInfo);\n }\n },\n onAudioTimingInfo: audioTimingInfo => {\n // we only want the first start value we encounter\n if (audioStartFn && typeof audioTimingInfo.start !== 'undefined') {\n audioStartFn(audioTimingInfo.start);\n audioStartFn = null;\n } // we want to continually update the end time\n\n if (audioEndFn && typeof audioTimingInfo.end !== 'undefined') {\n audioEndFn(audioTimingInfo.end);\n }\n },\n onVideoTimingInfo: videoTimingInfo => {\n // we only want the first start value we encounter\n if (videoStartFn && typeof videoTimingInfo.start !== 'undefined') {\n videoStartFn(videoTimingInfo.start);\n videoStartFn = null;\n } // we want to continually update the end time\n\n if (videoEndFn && typeof videoTimingInfo.end !== 'undefined') {\n videoEndFn(videoTimingInfo.end);\n }\n },\n onVideoSegmentTimingInfo: videoSegmentTimingInfo => {\n const timingInfo = {\n pts: {\n start: videoSegmentTimingInfo.start.presentation,\n end: videoSegmentTimingInfo.end.presentation\n },\n dts: {\n start: videoSegmentTimingInfo.start.decode,\n end: videoSegmentTimingInfo.end.decode\n }\n };\n triggerSegmentEventFn({\n type: 'segmenttransmuxingtiminginfoavailable',\n segment,\n timingInfo\n });\n videoSegmentTimingInfoFn(videoSegmentTimingInfo);\n },\n onAudioSegmentTimingInfo: audioSegmentTimingInfo => {\n const timingInfo = {\n pts: {\n start: audioSegmentTimingInfo.start.pts,\n end: audioSegmentTimingInfo.end.pts\n },\n dts: {\n start: audioSegmentTimingInfo.start.dts,\n end: audioSegmentTimingInfo.end.dts\n }\n };\n triggerSegmentEventFn({\n type: 'segmenttransmuxingtiminginfoavailable',\n segment,\n timingInfo\n });\n audioSegmentTimingInfoFn(audioSegmentTimingInfo);\n },\n onId3: (id3Frames, dispatchType) => {\n id3Fn(segment, id3Frames, dispatchType);\n },\n onCaptions: captions => {\n captionsFn(segment, [captions]);\n },\n isEndOfTimeline,\n onEndedTimeline: () => {\n endedTimelineFn();\n },\n onTransmuxerLog,\n onDone: (result, error) => {\n if (!doneFn) {\n return;\n }\n result.type = result.type === 'combined' ? 'video' : result.type;\n triggerSegmentEventFn({\n type: 'segmenttransmuxingcomplete',\n segment\n });\n doneFn(error, segment, result);\n },\n segment,\n triggerSegmentEventFn\n }); // In the transmuxer, we don't yet have the ability to extract a \"proper\" start time.\n // Meaning cached frame data may corrupt our notion of where this segment\n // really starts. To get around this, probe for the info needed.\n\n workerCallback({\n action: 'probeTs',\n transmuxer: segment.transmuxer,\n data: bytes,\n baseStartTime: segment.baseStartTime,\n callback: data => {\n segment.bytes = bytes = data.data;\n const probeResult = data.result;\n if (probeResult) {\n trackInfoFn(segment, {\n hasAudio: probeResult.hasAudio,\n hasVideo: probeResult.hasVideo,\n isMuxed\n });\n trackInfoFn = null;\n }\n finish();\n }\n });\n};\nconst handleSegmentBytes = ({\n segment,\n bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n}) => {\n let bytesAsUint8Array = new Uint8Array(bytes); // TODO:\n // We should have a handler that fetches the number of bytes required\n // to check if something is fmp4. This will allow us to save bandwidth\n // because we can only exclude a playlist and abort requests\n // by codec after trackinfo triggers.\n\n if (isLikelyFmp4MediaSegment(bytesAsUint8Array)) {\n segment.isFmp4 = true;\n const {\n tracks\n } = segment.map;\n const trackInfo = {\n isFmp4: true,\n hasVideo: !!tracks.video,\n hasAudio: !!tracks.audio\n }; // if we have a audio track, with a codec that is not set to\n // encrypted audio\n\n if (tracks.audio && tracks.audio.codec && tracks.audio.codec !== 'enca') {\n trackInfo.audioCodec = tracks.audio.codec;\n } // if we have a video track, with a codec that is not set to\n // encrypted video\n\n if (tracks.video && tracks.video.codec && tracks.video.codec !== 'encv') {\n trackInfo.videoCodec = tracks.video.codec;\n }\n if (tracks.video && tracks.audio) {\n trackInfo.isMuxed = true;\n } // since we don't support appending fmp4 data on progress, we know we have the full\n // segment here\n\n trackInfoFn(segment, trackInfo); // The probe doesn't provide the segment end time, so only callback with the start\n // time. The end time can be roughly calculated by the receiver using the duration.\n //\n // Note that the start time returned by the probe reflects the baseMediaDecodeTime, as\n // that is the true start of the segment (where the playback engine should begin\n // decoding).\n\n const finishLoading = (captions, id3Frames) => {\n // if the track still has audio at this point it is only possible\n // for it to be audio only. See `tracks.video && tracks.audio` if statement\n // above.\n // we make sure to use segment.bytes here as that\n dataFn(segment, {\n data: bytesAsUint8Array,\n type: trackInfo.hasAudio && !trackInfo.isMuxed ? 'audio' : 'video'\n });\n if (id3Frames && id3Frames.length) {\n id3Fn(segment, id3Frames);\n }\n if (captions && captions.length) {\n captionsFn(segment, captions);\n }\n doneFn(null, segment, {});\n };\n workerCallback({\n action: 'probeMp4StartTime',\n timescales: segment.map.timescales,\n data: bytesAsUint8Array,\n transmuxer: segment.transmuxer,\n callback: ({\n data,\n startTime\n }) => {\n // transfer bytes back to us\n bytes = data.buffer;\n segment.bytes = bytesAsUint8Array = data;\n if (trackInfo.hasAudio && !trackInfo.isMuxed) {\n timingInfoFn(segment, 'audio', 'start', startTime);\n }\n if (trackInfo.hasVideo) {\n timingInfoFn(segment, 'video', 'start', startTime);\n }\n workerCallback({\n action: 'probeEmsgID3',\n data: bytesAsUint8Array,\n transmuxer: segment.transmuxer,\n offset: startTime,\n callback: ({\n emsgData,\n id3Frames\n }) => {\n // transfer bytes back to us\n bytes = emsgData.buffer;\n segment.bytes = bytesAsUint8Array = emsgData; // Run through the CaptionParser in case there are captions.\n // Initialize CaptionParser if it hasn't been yet\n\n if (!tracks.video || !emsgData.byteLength || !segment.transmuxer) {\n finishLoading(undefined, id3Frames);\n return;\n }\n workerCallback({\n action: 'pushMp4Captions',\n endAction: 'mp4Captions',\n transmuxer: segment.transmuxer,\n data: bytesAsUint8Array,\n timescales: segment.map.timescales,\n trackIds: [tracks.video.id],\n callback: message => {\n // transfer bytes back to us\n bytes = message.data.buffer;\n segment.bytes = bytesAsUint8Array = message.data;\n message.logs.forEach(function (log) {\n onTransmuxerLog(merge(log, {\n stream: 'mp4CaptionParser'\n }));\n });\n finishLoading(message.captions, id3Frames);\n }\n });\n }\n });\n }\n });\n return;\n } // VTT or other segments that don't need processing\n\n if (!segment.transmuxer) {\n doneFn(null, segment, {});\n return;\n }\n if (typeof segment.container === 'undefined') {\n segment.container = detectContainerForBytes(bytesAsUint8Array);\n }\n if (segment.container !== 'ts' && segment.container !== 'aac') {\n trackInfoFn(segment, {\n hasAudio: false,\n hasVideo: false\n });\n doneFn(null, segment, {});\n return;\n } // ts or aac\n\n transmuxAndNotify({\n segment,\n bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n });\n};\nconst decrypt = function ({\n id,\n key,\n encryptedBytes,\n decryptionWorker,\n segment,\n doneFn\n}, callback) {\n const decryptionHandler = event => {\n if (event.data.source === id) {\n decryptionWorker.removeEventListener('message', decryptionHandler);\n const decrypted = event.data.decrypted;\n callback(new Uint8Array(decrypted.bytes, decrypted.byteOffset, decrypted.byteLength));\n }\n };\n decryptionWorker.onerror = () => {\n const message = 'An error occurred in the decryption worker';\n const segmentInfo = segmentInfoPayload({\n segment\n });\n const decryptError = {\n message,\n metadata: {\n error: new Error(message),\n errorType: videojs.Error.StreamingFailedToDecryptSegment,\n segmentInfo,\n keyInfo: {\n uri: segment.key.resolvedUri || segment.map.key.resolvedUri\n }\n }\n };\n doneFn(decryptError, segment);\n };\n decryptionWorker.addEventListener('message', decryptionHandler);\n let keyBytes;\n if (key.bytes.slice) {\n keyBytes = key.bytes.slice();\n } else {\n keyBytes = new Uint32Array(Array.prototype.slice.call(key.bytes));\n } // incrementally decrypt the bytes\n\n decryptionWorker.postMessage(createTransferableMessage({\n source: id,\n encrypted: encryptedBytes,\n key: keyBytes,\n iv: key.iv\n }), [encryptedBytes.buffer, keyBytes.buffer]);\n};\n/**\n * Decrypt the segment via the decryption web worker\n *\n * @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128 decryption\n * routines\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that is executed when segment bytes are available\n * and ready to use\n * @param {Function} doneFn - a callback that is executed after decryption has completed\n */\n\nconst decryptSegment = ({\n decryptionWorker,\n segment,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n}) => {\n triggerSegmentEventFn({\n type: 'segmentdecryptionstart'\n });\n decrypt({\n id: segment.requestId,\n key: segment.key,\n encryptedBytes: segment.encryptedBytes,\n decryptionWorker,\n segment,\n doneFn\n }, decryptedBytes => {\n segment.bytes = decryptedBytes;\n triggerSegmentEventFn({\n type: 'segmentdecryptioncomplete',\n segment\n });\n handleSegmentBytes({\n segment,\n bytes: segment.bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n });\n });\n};\n/**\n * This function waits for all XHRs to finish (with either success or failure)\n * before continueing processing via it's callback. The function gathers errors\n * from each request into a single errors array so that the error status for\n * each request can be examined later.\n *\n * @param {Object} activeXhrs - an object that tracks all XHR requests\n * @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128 decryption\n * routines\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} id3Fn - a callback that receives ID3 metadata\n * @param {Function} captionsFn - a callback that receives captions\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that is executed when segment bytes are available\n * and ready to use\n * @param {Function} doneFn - a callback that is executed after all resources have been\n * downloaded and any decryption completed\n */\n\nconst waitForCompletion = ({\n activeXhrs,\n decryptionWorker,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n}) => {\n let count = 0;\n let didError = false;\n return (error, segment) => {\n if (didError) {\n return;\n }\n if (error) {\n didError = true; // If there are errors, we have to abort any outstanding requests\n\n abortAll(activeXhrs); // Even though the requests above are aborted, and in theory we could wait until we\n // handle the aborted events from those requests, there are some cases where we may\n // never get an aborted event. For instance, if the network connection is lost and\n // there were two requests, the first may have triggered an error immediately, while\n // the second request remains unsent. In that case, the aborted algorithm will not\n // trigger an abort: see https://xhr.spec.whatwg.org/#the-abort()-method\n //\n // We also can't rely on the ready state of the XHR, since the request that\n // triggered the connection error may also show as a ready state of 0 (unsent).\n // Therefore, we have to finish this group of requests immediately after the first\n // seen error.\n\n return doneFn(error, segment);\n }\n count += 1;\n if (count === activeXhrs.length) {\n const segmentFinish = function () {\n if (segment.encryptedBytes) {\n return decryptSegment({\n decryptionWorker,\n segment,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n });\n } // Otherwise, everything is ready just continue\n\n handleSegmentBytes({\n segment,\n bytes: segment.bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n });\n }; // Keep track of when *all* of the requests have completed\n\n segment.endOfAllRequests = Date.now();\n if (segment.map && segment.map.encryptedBytes && !segment.map.bytes) {\n triggerSegmentEventFn({\n type: 'segmentdecryptionstart',\n segment\n });\n return decrypt({\n decryptionWorker,\n // add -init to the \"id\" to differentiate between segment\n // and init segment decryption, just in case they happen\n // at the same time at some point in the future.\n id: segment.requestId + '-init',\n encryptedBytes: segment.map.encryptedBytes,\n key: segment.map.key,\n segment,\n doneFn\n }, decryptedBytes => {\n segment.map.bytes = decryptedBytes;\n triggerSegmentEventFn({\n type: 'segmentdecryptioncomplete',\n segment\n });\n parseInitSegment(segment, parseError => {\n if (parseError) {\n abortAll(activeXhrs);\n return doneFn(parseError, segment);\n }\n segmentFinish();\n });\n });\n }\n segmentFinish();\n }\n };\n};\n/**\n * Calls the abort callback if any request within the batch was aborted. Will only call\n * the callback once per batch of requests, even if multiple were aborted.\n *\n * @param {Object} loadendState - state to check to see if the abort function was called\n * @param {Function} abortFn - callback to call for abort\n */\n\nconst handleLoadEnd = ({\n loadendState,\n abortFn\n}) => event => {\n const request = event.target;\n if (request.aborted && abortFn && !loadendState.calledAbortFn) {\n abortFn();\n loadendState.calledAbortFn = true;\n }\n};\n/**\n * Simple progress event callback handler that gathers some stats before\n * executing a provided callback with the `segment` object\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} progressFn - a callback that is executed each time a progress event\n * is received\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that is executed when segment bytes are available\n * and ready to use\n * @param {Event} event - the progress event object from XMLHttpRequest\n */\n\nconst handleProgress = ({\n segment,\n progressFn,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn\n}) => event => {\n const request = event.target;\n if (request.aborted) {\n return;\n }\n segment.stats = merge(segment.stats, getProgressStats(event)); // record the time that we receive the first byte of data\n\n if (!segment.stats.firstBytesReceivedAt && segment.stats.bytesReceived) {\n segment.stats.firstBytesReceivedAt = Date.now();\n }\n return progressFn(event, segment);\n};\n/**\n * Load all resources and does any processing necessary for a media-segment\n *\n * Features:\n * decrypts the media-segment if it has a key uri and an iv\n * aborts *all* requests if *any* one request fails\n *\n * The segment object, at minimum, has the following format:\n * {\n * resolvedUri: String,\n * [transmuxer]: Object,\n * [byterange]: {\n * offset: Number,\n * length: Number\n * },\n * [key]: {\n * resolvedUri: String\n * [byterange]: {\n * offset: Number,\n * length: Number\n * },\n * iv: {\n * bytes: Uint32Array\n * }\n * },\n * [map]: {\n * resolvedUri: String,\n * [byterange]: {\n * offset: Number,\n * length: Number\n * },\n * [bytes]: Uint8Array\n * }\n * }\n * ...where [name] denotes optional properties\n *\n * @param {Function} xhr - an instance of the xhr wrapper in xhr.js\n * @param {Object} xhrOptions - the base options to provide to all xhr requests\n * @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128\n * decryption routines\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} abortFn - a callback called (only once) if any piece of a request was\n * aborted\n * @param {Function} progressFn - a callback that receives progress events from the main\n * segment's xhr request\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} id3Fn - a callback that receives ID3 metadata\n * @param {Function} captionsFn - a callback that receives captions\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that receives data from the main segment's xhr\n * request, transmuxed if needed\n * @param {Function} doneFn - a callback that is executed only once all requests have\n * succeeded or failed\n * @return {Function} a function that, when invoked, immediately aborts all\n * outstanding requests\n */\n\nconst mediaSegmentRequest = ({\n xhr,\n xhrOptions,\n decryptionWorker,\n segment,\n abortFn,\n progressFn,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n}) => {\n const activeXhrs = [];\n const finishProcessingFn = waitForCompletion({\n activeXhrs,\n decryptionWorker,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog,\n triggerSegmentEventFn\n }); // optionally, request the decryption key\n\n if (segment.key && !segment.key.bytes) {\n const objects = [segment.key];\n if (segment.map && !segment.map.bytes && segment.map.key && segment.map.key.resolvedUri === segment.key.resolvedUri) {\n objects.push(segment.map.key);\n }\n const keyRequestOptions = merge(xhrOptions, {\n uri: segment.key.resolvedUri,\n responseType: 'arraybuffer',\n requestType: 'segment-key'\n });\n const keyRequestCallback = handleKeyResponse(segment, objects, finishProcessingFn, triggerSegmentEventFn);\n const keyInfo = {\n uri: segment.key.resolvedUri\n };\n triggerSegmentEventFn({\n type: 'segmentkeyloadstart',\n segment,\n keyInfo\n });\n const keyXhr = xhr(keyRequestOptions, keyRequestCallback);\n activeXhrs.push(keyXhr);\n } // optionally, request the associated media init segment\n\n if (segment.map && !segment.map.bytes) {\n const differentMapKey = segment.map.key && (!segment.key || segment.key.resolvedUri !== segment.map.key.resolvedUri);\n if (differentMapKey) {\n const mapKeyRequestOptions = merge(xhrOptions, {\n uri: segment.map.key.resolvedUri,\n responseType: 'arraybuffer',\n requestType: 'segment-key'\n });\n const mapKeyRequestCallback = handleKeyResponse(segment, [segment.map.key], finishProcessingFn, triggerSegmentEventFn);\n const keyInfo = {\n uri: segment.map.key.resolvedUri\n };\n triggerSegmentEventFn({\n type: 'segmentkeyloadstart',\n segment,\n keyInfo\n });\n const mapKeyXhr = xhr(mapKeyRequestOptions, mapKeyRequestCallback);\n activeXhrs.push(mapKeyXhr);\n }\n const initSegmentOptions = merge(xhrOptions, {\n uri: segment.map.resolvedUri,\n responseType: 'arraybuffer',\n headers: segmentXhrHeaders(segment.map),\n requestType: 'segment-media-initialization'\n });\n const initSegmentRequestCallback = handleInitSegmentResponse({\n segment,\n finishProcessingFn,\n triggerSegmentEventFn\n });\n triggerSegmentEventFn({\n type: 'segmentloadstart',\n segment\n });\n const initSegmentXhr = xhr(initSegmentOptions, initSegmentRequestCallback);\n activeXhrs.push(initSegmentXhr);\n }\n const segmentRequestOptions = merge(xhrOptions, {\n uri: segment.part && segment.part.resolvedUri || segment.resolvedUri,\n responseType: 'arraybuffer',\n headers: segmentXhrHeaders(segment),\n requestType: 'segment'\n });\n const segmentRequestCallback = handleSegmentResponse({\n segment,\n finishProcessingFn,\n responseType: segmentRequestOptions.responseType,\n triggerSegmentEventFn\n });\n triggerSegmentEventFn({\n type: 'segmentloadstart',\n segment\n });\n const segmentXhr = xhr(segmentRequestOptions, segmentRequestCallback);\n segmentXhr.addEventListener('progress', handleProgress({\n segment,\n progressFn,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn\n }));\n activeXhrs.push(segmentXhr); // since all parts of the request must be considered, but should not make callbacks\n // multiple times, provide a shared state object\n\n const loadendState = {};\n activeXhrs.forEach(activeXhr => {\n activeXhr.addEventListener('loadend', handleLoadEnd({\n loadendState,\n abortFn\n }));\n });\n return () => abortAll(activeXhrs);\n};\n\n/**\n * @file - codecs.js - Handles tasks regarding codec strings such as translating them to\n * codec strings, or translating codec strings into objects that can be examined.\n */\nconst logFn$1 = logger('CodecUtils');\n/**\n * Returns a set of codec strings parsed from the playlist or the default\n * codec strings if no codecs were specified in the playlist\n *\n * @param {Playlist} media the current media playlist\n * @return {Object} an object with the video and audio codecs\n */\n\nconst getCodecs = function (media) {\n // if the codecs were explicitly specified, use them instead of the\n // defaults\n const mediaAttributes = media.attributes || {};\n if (mediaAttributes.CODECS) {\n return parseCodecs(mediaAttributes.CODECS);\n }\n};\nconst isMaat = (main, media) => {\n const mediaAttributes = media.attributes || {};\n return main && main.mediaGroups && main.mediaGroups.AUDIO && mediaAttributes.AUDIO && main.mediaGroups.AUDIO[mediaAttributes.AUDIO];\n};\nconst isMuxed = (main, media) => {\n if (!isMaat(main, media)) {\n return true;\n }\n const mediaAttributes = media.attributes || {};\n const audioGroup = main.mediaGroups.AUDIO[mediaAttributes.AUDIO];\n for (const groupId in audioGroup) {\n // If an audio group has a URI (the case for HLS, as HLS will use external playlists),\n // or there are listed playlists (the case for DASH, as the manifest will have already\n // provided all of the details necessary to generate the audio playlist, as opposed to\n // HLS' externally requested playlists), then the content is demuxed.\n if (!audioGroup[groupId].uri && !audioGroup[groupId].playlists) {\n return true;\n }\n }\n return false;\n};\nconst unwrapCodecList = function (codecList) {\n const codecs = {};\n codecList.forEach(({\n mediaType,\n type,\n details\n }) => {\n codecs[mediaType] = codecs[mediaType] || [];\n codecs[mediaType].push(translateLegacyCodec(`${type}${details}`));\n });\n Object.keys(codecs).forEach(function (mediaType) {\n if (codecs[mediaType].length > 1) {\n logFn$1(`multiple ${mediaType} codecs found as attributes: ${codecs[mediaType].join(', ')}. Setting playlist codecs to null so that we wait for mux.js to probe segments for real codecs.`);\n codecs[mediaType] = null;\n return;\n }\n codecs[mediaType] = codecs[mediaType][0];\n });\n return codecs;\n};\nconst codecCount = function (codecObj) {\n let count = 0;\n if (codecObj.audio) {\n count++;\n }\n if (codecObj.video) {\n count++;\n }\n return count;\n};\n/**\n * Calculates the codec strings for a working configuration of\n * SourceBuffers to play variant streams in a main playlist. If\n * there is no possible working configuration, an empty object will be\n * returned.\n *\n * @param main {Object} the m3u8 object for the main playlist\n * @param media {Object} the m3u8 object for the variant playlist\n * @return {Object} the codec strings.\n *\n * @private\n */\n\nconst codecsForPlaylist = function (main, media) {\n const mediaAttributes = media.attributes || {};\n const codecInfo = unwrapCodecList(getCodecs(media) || []); // HLS with multiple-audio tracks must always get an audio codec.\n // Put another way, there is no way to have a video-only multiple-audio HLS!\n\n if (isMaat(main, media) && !codecInfo.audio) {\n if (!isMuxed(main, media)) {\n // It is possible for codecs to be specified on the audio media group playlist but\n // not on the rendition playlist. This is mostly the case for DASH, where audio and\n // video are always separate (and separately specified).\n const defaultCodecs = unwrapCodecList(codecsFromDefault(main, mediaAttributes.AUDIO) || []);\n if (defaultCodecs.audio) {\n codecInfo.audio = defaultCodecs.audio;\n }\n }\n }\n return codecInfo;\n};\nconst logFn = logger('PlaylistSelector');\nconst representationToString = function (representation) {\n if (!representation || !representation.playlist) {\n return;\n }\n const playlist = representation.playlist;\n return JSON.stringify({\n id: playlist.id,\n bandwidth: representation.bandwidth,\n width: representation.width,\n height: representation.height,\n codecs: playlist.attributes && playlist.attributes.CODECS || ''\n });\n}; // Utilities\n\n/**\n * Returns the CSS value for the specified property on an element\n * using `getComputedStyle`. Firefox has a long-standing issue where\n * getComputedStyle() may return null when running in an iframe with\n * `display: none`.\n *\n * @see https://bugzilla.mozilla.org/show_bug.cgi?id=548397\n * @param {HTMLElement} el the htmlelement to work on\n * @param {string} the proprety to get the style for\n */\n\nconst safeGetComputedStyle = function (el, property) {\n if (!el) {\n return '';\n }\n const result = window$1.getComputedStyle(el);\n if (!result) {\n return '';\n }\n return result[property];\n};\n/**\n * Resuable stable sort function\n *\n * @param {Playlists} array\n * @param {Function} sortFn Different comparators\n * @function stableSort\n */\n\nconst stableSort = function (array, sortFn) {\n const newArray = array.slice();\n array.sort(function (left, right) {\n const cmp = sortFn(left, right);\n if (cmp === 0) {\n return newArray.indexOf(left) - newArray.indexOf(right);\n }\n return cmp;\n });\n};\n/**\n * A comparator function to sort two playlist object by bandwidth.\n *\n * @param {Object} left a media playlist object\n * @param {Object} right a media playlist object\n * @return {number} Greater than zero if the bandwidth attribute of\n * left is greater than the corresponding attribute of right. Less\n * than zero if the bandwidth of right is greater than left and\n * exactly zero if the two are equal.\n */\n\nconst comparePlaylistBandwidth = function (left, right) {\n let leftBandwidth;\n let rightBandwidth;\n if (left.attributes.BANDWIDTH) {\n leftBandwidth = left.attributes.BANDWIDTH;\n }\n leftBandwidth = leftBandwidth || window$1.Number.MAX_VALUE;\n if (right.attributes.BANDWIDTH) {\n rightBandwidth = right.attributes.BANDWIDTH;\n }\n rightBandwidth = rightBandwidth || window$1.Number.MAX_VALUE;\n return leftBandwidth - rightBandwidth;\n};\n/**\n * A comparator function to sort two playlist object by resolution (width).\n *\n * @param {Object} left a media playlist object\n * @param {Object} right a media playlist object\n * @return {number} Greater than zero if the resolution.width attribute of\n * left is greater than the corresponding attribute of right. Less\n * than zero if the resolution.width of right is greater than left and\n * exactly zero if the two are equal.\n */\n\nconst comparePlaylistResolution = function (left, right) {\n let leftWidth;\n let rightWidth;\n if (left.attributes.RESOLUTION && left.attributes.RESOLUTION.width) {\n leftWidth = left.attributes.RESOLUTION.width;\n }\n leftWidth = leftWidth || window$1.Number.MAX_VALUE;\n if (right.attributes.RESOLUTION && right.attributes.RESOLUTION.width) {\n rightWidth = right.attributes.RESOLUTION.width;\n }\n rightWidth = rightWidth || window$1.Number.MAX_VALUE; // NOTE - Fallback to bandwidth sort as appropriate in cases where multiple renditions\n // have the same media dimensions/ resolution\n\n if (leftWidth === rightWidth && left.attributes.BANDWIDTH && right.attributes.BANDWIDTH) {\n return left.attributes.BANDWIDTH - right.attributes.BANDWIDTH;\n }\n return leftWidth - rightWidth;\n};\n/**\n * Chooses the appropriate media playlist based on bandwidth and player size\n *\n * @param {Object} main\n * Object representation of the main manifest\n * @param {number} playerBandwidth\n * Current calculated bandwidth of the player\n * @param {number} playerWidth\n * Current width of the player element (should account for the device pixel ratio)\n * @param {number} playerHeight\n * Current height of the player element (should account for the device pixel ratio)\n * @param {boolean} limitRenditionByPlayerDimensions\n * True if the player width and height should be used during the selection, false otherwise\n * @param {Object} playlistController\n * the current playlistController object\n * @return {Playlist} the highest bitrate playlist less than the\n * currently detected bandwidth, accounting for some amount of\n * bandwidth variance\n */\n\nlet simpleSelector = function (main, playerBandwidth, playerWidth, playerHeight, limitRenditionByPlayerDimensions, playlistController) {\n // If we end up getting called before `main` is available, exit early\n if (!main) {\n return;\n }\n const options = {\n bandwidth: playerBandwidth,\n width: playerWidth,\n height: playerHeight,\n limitRenditionByPlayerDimensions\n };\n let playlists = main.playlists; // if playlist is audio only, select between currently active audio group playlists.\n\n if (Playlist.isAudioOnly(main)) {\n playlists = playlistController.getAudioTrackPlaylists_(); // add audioOnly to options so that we log audioOnly: true\n // at the buttom of this function for debugging.\n\n options.audioOnly = true;\n } // convert the playlists to an intermediary representation to make comparisons easier\n\n let sortedPlaylistReps = playlists.map(playlist => {\n let bandwidth;\n const width = playlist.attributes && playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.width;\n const height = playlist.attributes && playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.height;\n bandwidth = playlist.attributes && playlist.attributes.BANDWIDTH;\n bandwidth = bandwidth || window$1.Number.MAX_VALUE;\n return {\n bandwidth,\n width,\n height,\n playlist\n };\n });\n stableSort(sortedPlaylistReps, (left, right) => left.bandwidth - right.bandwidth); // filter out any playlists that have been excluded due to\n // incompatible configurations\n\n sortedPlaylistReps = sortedPlaylistReps.filter(rep => !Playlist.isIncompatible(rep.playlist)); // filter out any playlists that have been disabled manually through the representations\n // api or excluded temporarily due to playback errors.\n\n let enabledPlaylistReps = sortedPlaylistReps.filter(rep => Playlist.isEnabled(rep.playlist));\n if (!enabledPlaylistReps.length) {\n // if there are no enabled playlists, then they have all been excluded or disabled\n // by the user through the representations api. In this case, ignore exclusion and\n // fallback to what the user wants by using playlists the user has not disabled.\n enabledPlaylistReps = sortedPlaylistReps.filter(rep => !Playlist.isDisabled(rep.playlist));\n } // filter out any variant that has greater effective bitrate\n // than the current estimated bandwidth\n\n const bandwidthPlaylistReps = enabledPlaylistReps.filter(rep => rep.bandwidth * Config.BANDWIDTH_VARIANCE < playerBandwidth);\n let highestRemainingBandwidthRep = bandwidthPlaylistReps[bandwidthPlaylistReps.length - 1]; // get all of the renditions with the same (highest) bandwidth\n // and then taking the very first element\n\n const bandwidthBestRep = bandwidthPlaylistReps.filter(rep => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0]; // if we're not going to limit renditions by player size, make an early decision.\n\n if (limitRenditionByPlayerDimensions === false) {\n const chosenRep = bandwidthBestRep || enabledPlaylistReps[0] || sortedPlaylistReps[0];\n if (chosenRep && chosenRep.playlist) {\n let type = 'sortedPlaylistReps';\n if (bandwidthBestRep) {\n type = 'bandwidthBestRep';\n }\n if (enabledPlaylistReps[0]) {\n type = 'enabledPlaylistReps';\n }\n logFn(`choosing ${representationToString(chosenRep)} using ${type} with options`, options);\n return chosenRep.playlist;\n }\n logFn('could not choose a playlist with options', options);\n return null;\n } // filter out playlists without resolution information\n\n const haveResolution = bandwidthPlaylistReps.filter(rep => rep.width && rep.height); // sort variants by resolution\n\n stableSort(haveResolution, (left, right) => left.width - right.width); // if we have the exact resolution as the player use it\n\n const resolutionBestRepList = haveResolution.filter(rep => rep.width === playerWidth && rep.height === playerHeight);\n highestRemainingBandwidthRep = resolutionBestRepList[resolutionBestRepList.length - 1]; // ensure that we pick the highest bandwidth variant that have exact resolution\n\n const resolutionBestRep = resolutionBestRepList.filter(rep => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0];\n let resolutionPlusOneList;\n let resolutionPlusOneSmallest;\n let resolutionPlusOneRep; // find the smallest variant that is larger than the player\n // if there is no match of exact resolution\n\n if (!resolutionBestRep) {\n resolutionPlusOneList = haveResolution.filter(rep => rep.width > playerWidth || rep.height > playerHeight); // find all the variants have the same smallest resolution\n\n resolutionPlusOneSmallest = resolutionPlusOneList.filter(rep => rep.width === resolutionPlusOneList[0].width && rep.height === resolutionPlusOneList[0].height); // ensure that we also pick the highest bandwidth variant that\n // is just-larger-than the video player\n\n highestRemainingBandwidthRep = resolutionPlusOneSmallest[resolutionPlusOneSmallest.length - 1];\n resolutionPlusOneRep = resolutionPlusOneSmallest.filter(rep => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0];\n }\n let leastPixelDiffRep; // If this selector proves to be better than others,\n // resolutionPlusOneRep and resolutionBestRep and all\n // the code involving them should be removed.\n\n if (playlistController.leastPixelDiffSelector) {\n // find the variant that is closest to the player's pixel size\n const leastPixelDiffList = haveResolution.map(rep => {\n rep.pixelDiff = Math.abs(rep.width - playerWidth) + Math.abs(rep.height - playerHeight);\n return rep;\n }); // get the highest bandwidth, closest resolution playlist\n\n stableSort(leastPixelDiffList, (left, right) => {\n // sort by highest bandwidth if pixelDiff is the same\n if (left.pixelDiff === right.pixelDiff) {\n return right.bandwidth - left.bandwidth;\n }\n return left.pixelDiff - right.pixelDiff;\n });\n leastPixelDiffRep = leastPixelDiffList[0];\n } // fallback chain of variants\n\n const chosenRep = leastPixelDiffRep || resolutionPlusOneRep || resolutionBestRep || bandwidthBestRep || enabledPlaylistReps[0] || sortedPlaylistReps[0];\n if (chosenRep && chosenRep.playlist) {\n let type = 'sortedPlaylistReps';\n if (leastPixelDiffRep) {\n type = 'leastPixelDiffRep';\n } else if (resolutionPlusOneRep) {\n type = 'resolutionPlusOneRep';\n } else if (resolutionBestRep) {\n type = 'resolutionBestRep';\n } else if (bandwidthBestRep) {\n type = 'bandwidthBestRep';\n } else if (enabledPlaylistReps[0]) {\n type = 'enabledPlaylistReps';\n }\n logFn(`choosing ${representationToString(chosenRep)} using ${type} with options`, options);\n return chosenRep.playlist;\n }\n logFn('could not choose a playlist with options', options);\n return null;\n};\n\n/**\n * Chooses the appropriate media playlist based on the most recent\n * bandwidth estimate and the player size.\n *\n * Expects to be called within the context of an instance of VhsHandler\n *\n * @return {Playlist} the highest bitrate playlist less than the\n * currently detected bandwidth, accounting for some amount of\n * bandwidth variance\n */\n\nconst lastBandwidthSelector = function () {\n let pixelRatio = this.useDevicePixelRatio ? window$1.devicePixelRatio || 1 : 1;\n if (!isNaN(this.customPixelRatio)) {\n pixelRatio = this.customPixelRatio;\n }\n return simpleSelector(this.playlists.main, this.systemBandwidth, parseInt(safeGetComputedStyle(this.tech_.el(), 'width'), 10) * pixelRatio, parseInt(safeGetComputedStyle(this.tech_.el(), 'height'), 10) * pixelRatio, this.limitRenditionByPlayerDimensions, this.playlistController_);\n};\n/**\n * Chooses the appropriate media playlist based on an\n * exponential-weighted moving average of the bandwidth after\n * filtering for player size.\n *\n * Expects to be called within the context of an instance of VhsHandler\n *\n * @param {number} decay - a number between 0 and 1. Higher values of\n * this parameter will cause previous bandwidth estimates to lose\n * significance more quickly.\n * @return {Function} a function which can be invoked to create a new\n * playlist selector function.\n * @see https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average\n */\n\nconst movingAverageBandwidthSelector = function (decay) {\n let average = -1;\n let lastSystemBandwidth = -1;\n if (decay < 0 || decay > 1) {\n throw new Error('Moving average bandwidth decay must be between 0 and 1.');\n }\n return function () {\n let pixelRatio = this.useDevicePixelRatio ? window$1.devicePixelRatio || 1 : 1;\n if (!isNaN(this.customPixelRatio)) {\n pixelRatio = this.customPixelRatio;\n }\n if (average < 0) {\n average = this.systemBandwidth;\n lastSystemBandwidth = this.systemBandwidth;\n } // stop the average value from decaying for every 250ms\n // when the systemBandwidth is constant\n // and\n // stop average from setting to a very low value when the\n // systemBandwidth becomes 0 in case of chunk cancellation\n\n if (this.systemBandwidth > 0 && this.systemBandwidth !== lastSystemBandwidth) {\n average = decay * this.systemBandwidth + (1 - decay) * average;\n lastSystemBandwidth = this.systemBandwidth;\n }\n return simpleSelector(this.playlists.main, average, parseInt(safeGetComputedStyle(this.tech_.el(), 'width'), 10) * pixelRatio, parseInt(safeGetComputedStyle(this.tech_.el(), 'height'), 10) * pixelRatio, this.limitRenditionByPlayerDimensions, this.playlistController_);\n };\n};\n/**\n * Chooses the appropriate media playlist based on the potential to rebuffer\n *\n * @param {Object} settings\n * Object of information required to use this selector\n * @param {Object} settings.main\n * Object representation of the main manifest\n * @param {number} settings.currentTime\n * The current time of the player\n * @param {number} settings.bandwidth\n * Current measured bandwidth\n * @param {number} settings.duration\n * Duration of the media\n * @param {number} settings.segmentDuration\n * Segment duration to be used in round trip time calculations\n * @param {number} settings.timeUntilRebuffer\n * Time left in seconds until the player has to rebuffer\n * @param {number} settings.currentTimeline\n * The current timeline segments are being loaded from\n * @param {SyncController} settings.syncController\n * SyncController for determining if we have a sync point for a given playlist\n * @return {Object|null}\n * {Object} return.playlist\n * The highest bandwidth playlist with the least amount of rebuffering\n * {Number} return.rebufferingImpact\n * The amount of time in seconds switching to this playlist will rebuffer. A\n * negative value means that switching will cause zero rebuffering.\n */\n\nconst minRebufferMaxBandwidthSelector = function (settings) {\n const {\n main,\n currentTime,\n bandwidth,\n duration,\n segmentDuration,\n timeUntilRebuffer,\n currentTimeline,\n syncController\n } = settings; // filter out any playlists that have been excluded due to\n // incompatible configurations\n\n const compatiblePlaylists = main.playlists.filter(playlist => !Playlist.isIncompatible(playlist)); // filter out any playlists that have been disabled manually through the representations\n // api or excluded temporarily due to playback errors.\n\n let enabledPlaylists = compatiblePlaylists.filter(Playlist.isEnabled);\n if (!enabledPlaylists.length) {\n // if there are no enabled playlists, then they have all been excluded or disabled\n // by the user through the representations api. In this case, ignore exclusion and\n // fallback to what the user wants by using playlists the user has not disabled.\n enabledPlaylists = compatiblePlaylists.filter(playlist => !Playlist.isDisabled(playlist));\n }\n const bandwidthPlaylists = enabledPlaylists.filter(Playlist.hasAttribute.bind(null, 'BANDWIDTH'));\n const rebufferingEstimates = bandwidthPlaylists.map(playlist => {\n const syncPoint = syncController.getSyncPoint(playlist, duration, currentTimeline, currentTime); // If there is no sync point for this playlist, switching to it will require a\n // sync request first. This will double the request time\n\n const numRequests = syncPoint ? 1 : 2;\n const requestTimeEstimate = Playlist.estimateSegmentRequestTime(segmentDuration, bandwidth, playlist);\n const rebufferingImpact = requestTimeEstimate * numRequests - timeUntilRebuffer;\n return {\n playlist,\n rebufferingImpact\n };\n });\n const noRebufferingPlaylists = rebufferingEstimates.filter(estimate => estimate.rebufferingImpact <= 0); // Sort by bandwidth DESC\n\n stableSort(noRebufferingPlaylists, (a, b) => comparePlaylistBandwidth(b.playlist, a.playlist));\n if (noRebufferingPlaylists.length) {\n return noRebufferingPlaylists[0];\n }\n stableSort(rebufferingEstimates, (a, b) => a.rebufferingImpact - b.rebufferingImpact);\n return rebufferingEstimates[0] || null;\n};\n/**\n * Chooses the appropriate media playlist, which in this case is the lowest bitrate\n * one with video. If no renditions with video exist, return the lowest audio rendition.\n *\n * Expects to be called within the context of an instance of VhsHandler\n *\n * @return {Object|null}\n * {Object} return.playlist\n * The lowest bitrate playlist that contains a video codec. If no such rendition\n * exists pick the lowest audio rendition.\n */\n\nconst lowestBitrateCompatibleVariantSelector = function () {\n // filter out any playlists that have been excluded due to\n // incompatible configurations or playback errors\n const playlists = this.playlists.main.playlists.filter(Playlist.isEnabled); // Sort ascending by bitrate\n\n stableSort(playlists, (a, b) => comparePlaylistBandwidth(a, b)); // Parse and assume that playlists with no video codec have no video\n // (this is not necessarily true, although it is generally true).\n //\n // If an entire manifest has no valid videos everything will get filtered\n // out.\n\n const playlistsWithVideo = playlists.filter(playlist => !!codecsForPlaylist(this.playlists.main, playlist).video);\n return playlistsWithVideo[0] || null;\n};\n\n/**\n * Combine all segments into a single Uint8Array\n *\n * @param {Object} segmentObj\n * @return {Uint8Array} concatenated bytes\n * @private\n */\nconst concatSegments = segmentObj => {\n let offset = 0;\n let tempBuffer;\n if (segmentObj.bytes) {\n tempBuffer = new Uint8Array(segmentObj.bytes); // combine the individual segments into one large typed-array\n\n segmentObj.segments.forEach(segment => {\n tempBuffer.set(segment, offset);\n offset += segment.byteLength;\n });\n }\n return tempBuffer;\n};\n/**\n * Example:\n * https://host.com/path1/path2/path3/segment.ts?arg1=val1\n * -->\n * path3/segment.ts\n *\n * @param resolvedUri\n * @return {string}\n */\n\nfunction compactSegmentUrlDescription(resolvedUri) {\n try {\n return new URL(resolvedUri).pathname.split('/').slice(-2).join('/');\n } catch (e) {\n return '';\n }\n}\n\n/**\n * @file text-tracks.js\n */\n/**\n * Create captions text tracks on video.js if they do not exist\n *\n * @param {Object} inbandTextTracks a reference to current inbandTextTracks\n * @param {Object} tech the video.js tech\n * @param {Object} captionStream the caption stream to create\n * @private\n */\n\nconst createCaptionsTrackIfNotExists = function (inbandTextTracks, tech, captionStream) {\n if (!inbandTextTracks[captionStream]) {\n tech.trigger({\n type: 'usage',\n name: 'vhs-608'\n });\n let instreamId = captionStream; // we need to translate SERVICEn for 708 to how mux.js currently labels them\n\n if (/^cc708_/.test(captionStream)) {\n instreamId = 'SERVICE' + captionStream.split('_')[1];\n }\n const track = tech.textTracks().getTrackById(instreamId);\n if (track) {\n // Resuse an existing track with a CC# id because this was\n // very likely created by videojs-contrib-hls from information\n // in the m3u8 for us to use\n inbandTextTracks[captionStream] = track;\n } else {\n // This section gets called when we have caption services that aren't specified in the manifest.\n // Manifest level caption services are handled in media-groups.js under CLOSED-CAPTIONS.\n const captionServices = tech.options_.vhs && tech.options_.vhs.captionServices || {};\n let label = captionStream;\n let language = captionStream;\n let def = false;\n const captionService = captionServices[instreamId];\n if (captionService) {\n label = captionService.label;\n language = captionService.language;\n def = captionService.default;\n } // Otherwise, create a track with the default `CC#` label and\n // without a language\n\n inbandTextTracks[captionStream] = tech.addRemoteTextTrack({\n kind: 'captions',\n id: instreamId,\n // TODO: investigate why this doesn't seem to turn the caption on by default\n default: def,\n label,\n language\n }, false).track;\n }\n }\n};\n/**\n * Add caption text track data to a source handler given an array of captions\n *\n * @param {Object}\n * @param {Object} inbandTextTracks the inband text tracks\n * @param {number} timestampOffset the timestamp offset of the source buffer\n * @param {Array} captionArray an array of caption data\n * @private\n */\n\nconst addCaptionData = function ({\n inbandTextTracks,\n captionArray,\n timestampOffset\n}) {\n if (!captionArray) {\n return;\n }\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n captionArray.forEach(caption => {\n const track = caption.stream; // in CEA 608 captions, video.js/mux.js sends a content array\n // with positioning data\n\n if (caption.content) {\n caption.content.forEach(value => {\n const cue = new Cue(caption.startTime + timestampOffset, caption.endTime + timestampOffset, value.text);\n cue.line = value.line;\n cue.align = 'left';\n cue.position = value.position;\n cue.positionAlign = 'line-left';\n inbandTextTracks[track].addCue(cue);\n });\n } else {\n // otherwise, a text value with combined captions is sent\n inbandTextTracks[track].addCue(new Cue(caption.startTime + timestampOffset, caption.endTime + timestampOffset, caption.text));\n }\n });\n};\n/**\n * Define properties on a cue for backwards compatability,\n * but warn the user that the way that they are using it\n * is depricated and will be removed at a later date.\n *\n * @param {Cue} cue the cue to add the properties on\n * @private\n */\n\nconst deprecateOldCue = function (cue) {\n Object.defineProperties(cue.frame, {\n id: {\n get() {\n videojs.log.warn('cue.frame.id is deprecated. Use cue.value.key instead.');\n return cue.value.key;\n }\n },\n value: {\n get() {\n videojs.log.warn('cue.frame.value is deprecated. Use cue.value.data instead.');\n return cue.value.data;\n }\n },\n privateData: {\n get() {\n videojs.log.warn('cue.frame.privateData is deprecated. Use cue.value.data instead.');\n return cue.value.data;\n }\n }\n });\n};\n/**\n * Add metadata text track data to a source handler given an array of metadata\n *\n * @param {Object}\n * @param {Object} inbandTextTracks the inband text tracks\n * @param {Array} metadataArray an array of meta data\n * @param {number} timestampOffset the timestamp offset of the source buffer\n * @param {number} videoDuration the duration of the video\n * @private\n */\n\nconst addMetadata = ({\n inbandTextTracks,\n metadataArray,\n timestampOffset,\n videoDuration\n}) => {\n if (!metadataArray) {\n return;\n }\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n const metadataTrack = inbandTextTracks.metadataTrack_;\n if (!metadataTrack) {\n return;\n }\n metadataArray.forEach(metadata => {\n const time = metadata.cueTime + timestampOffset; // if time isn't a finite number between 0 and Infinity, like NaN,\n // ignore this bit of metadata.\n // This likely occurs when you have an non-timed ID3 tag like TIT2,\n // which is the \"Title/Songname/Content description\" frame\n\n if (typeof time !== 'number' || window$1.isNaN(time) || time < 0 || !(time < Infinity)) {\n return;\n } // If we have no frames, we can't create a cue.\n\n if (!metadata.frames || !metadata.frames.length) {\n return;\n }\n metadata.frames.forEach(frame => {\n const cue = new Cue(time, time, frame.value || frame.url || frame.data || '');\n cue.frame = frame;\n cue.value = frame;\n deprecateOldCue(cue);\n metadataTrack.addCue(cue);\n });\n });\n if (!metadataTrack.cues || !metadataTrack.cues.length) {\n return;\n } // Updating the metadeta cues so that\n // the endTime of each cue is the startTime of the next cue\n // the endTime of last cue is the duration of the video\n\n const cues = metadataTrack.cues;\n const cuesArray = []; // Create a copy of the TextTrackCueList...\n // ...disregarding cues with a falsey value\n\n for (let i = 0; i < cues.length; i++) {\n if (cues[i]) {\n cuesArray.push(cues[i]);\n }\n } // Group cues by their startTime value\n\n const cuesGroupedByStartTime = cuesArray.reduce((obj, cue) => {\n const timeSlot = obj[cue.startTime] || [];\n timeSlot.push(cue);\n obj[cue.startTime] = timeSlot;\n return obj;\n }, {}); // Sort startTimes by ascending order\n\n const sortedStartTimes = Object.keys(cuesGroupedByStartTime).sort((a, b) => Number(a) - Number(b)); // Map each cue group's endTime to the next group's startTime\n\n sortedStartTimes.forEach((startTime, idx) => {\n const cueGroup = cuesGroupedByStartTime[startTime];\n const finiteDuration = isFinite(videoDuration) ? videoDuration : startTime;\n const nextTime = Number(sortedStartTimes[idx + 1]) || finiteDuration; // Map each cue's endTime the next group's startTime\n\n cueGroup.forEach(cue => {\n cue.endTime = nextTime;\n });\n });\n}; // object for mapping daterange attributes\n\nconst dateRangeAttr = {\n id: 'ID',\n class: 'CLASS',\n startDate: 'START-DATE',\n duration: 'DURATION',\n endDate: 'END-DATE',\n endOnNext: 'END-ON-NEXT',\n plannedDuration: 'PLANNED-DURATION',\n scte35Out: 'SCTE35-OUT',\n scte35In: 'SCTE35-IN'\n};\nconst dateRangeKeysToOmit = new Set(['id', 'class', 'startDate', 'duration', 'endDate', 'endOnNext', 'startTime', 'endTime', 'processDateRange']);\n/**\n * Add DateRange metadata text track to a source handler given an array of metadata\n *\n * @param {Object}\n * @param {Object} inbandTextTracks the inband text tracks\n * @param {Array} dateRanges parsed media playlist\n * @private\n */\n\nconst addDateRangeMetadata = ({\n inbandTextTracks,\n dateRanges\n}) => {\n const metadataTrack = inbandTextTracks.metadataTrack_;\n if (!metadataTrack) {\n return;\n }\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n dateRanges.forEach(dateRange => {\n // we generate multiple cues for each date range with different attributes\n for (const key of Object.keys(dateRange)) {\n if (dateRangeKeysToOmit.has(key)) {\n continue;\n }\n const cue = new Cue(dateRange.startTime, dateRange.endTime, '');\n cue.id = dateRange.id;\n cue.type = 'com.apple.quicktime.HLS';\n cue.value = {\n key: dateRangeAttr[key],\n data: dateRange[key]\n };\n if (key === 'scte35Out' || key === 'scte35In') {\n cue.value.data = new Uint8Array(cue.value.data.match(/[\\da-f]{2}/gi)).buffer;\n }\n metadataTrack.addCue(cue);\n }\n dateRange.processDateRange();\n });\n};\n/**\n * Create metadata text track on video.js if it does not exist\n *\n * @param {Object} inbandTextTracks a reference to current inbandTextTracks\n * @param {string} dispatchType the inband metadata track dispatch type\n * @param {Object} tech the video.js tech\n * @private\n */\n\nconst createMetadataTrackIfNotExists = (inbandTextTracks, dispatchType, tech) => {\n if (inbandTextTracks.metadataTrack_) {\n return;\n }\n inbandTextTracks.metadataTrack_ = tech.addRemoteTextTrack({\n kind: 'metadata',\n label: 'Timed Metadata'\n }, false).track;\n if (!videojs.browser.IS_ANY_SAFARI) {\n inbandTextTracks.metadataTrack_.inBandMetadataTrackDispatchType = dispatchType;\n }\n};\n/**\n * Remove cues from a track on video.js.\n *\n * @param {Double} start start of where we should remove the cue\n * @param {Double} end end of where the we should remove the cue\n * @param {Object} track the text track to remove the cues from\n * @private\n */\n\nconst removeCuesFromTrack = function (start, end, track) {\n let i;\n let cue;\n if (!track) {\n return;\n }\n if (!track.cues) {\n return;\n }\n i = track.cues.length;\n while (i--) {\n cue = track.cues[i]; // Remove any cue within the provided start and end time\n\n if (cue.startTime >= start && cue.endTime <= end) {\n track.removeCue(cue);\n }\n }\n};\n/**\n * Remove duplicate cues from a track on video.js (a cue is considered a\n * duplicate if it has the same time interval and text as another)\n *\n * @param {Object} track the text track to remove the duplicate cues from\n * @private\n */\n\nconst removeDuplicateCuesFromTrack = function (track) {\n const cues = track.cues;\n if (!cues) {\n return;\n }\n const uniqueCues = {};\n for (let i = cues.length - 1; i >= 0; i--) {\n const cue = cues[i];\n const cueKey = `${cue.startTime}-${cue.endTime}-${cue.text}`;\n if (uniqueCues[cueKey]) {\n track.removeCue(cue);\n } else {\n uniqueCues[cueKey] = cue;\n }\n }\n};\n\n/**\n * Returns a list of gops in the buffer that have a pts value of 3 seconds or more in\n * front of current time.\n *\n * @param {Array} buffer\n * The current buffer of gop information\n * @param {number} currentTime\n * The current time\n * @param {Double} mapping\n * Offset to map display time to stream presentation time\n * @return {Array}\n * List of gops considered safe to append over\n */\n\nconst gopsSafeToAlignWith = (buffer, currentTime, mapping) => {\n if (typeof currentTime === 'undefined' || currentTime === null || !buffer.length) {\n return [];\n } // pts value for current time + 3 seconds to give a bit more wiggle room\n\n const currentTimePts = Math.ceil((currentTime - mapping + 3) * ONE_SECOND_IN_TS);\n let i;\n for (i = 0; i < buffer.length; i++) {\n if (buffer[i].pts > currentTimePts) {\n break;\n }\n }\n return buffer.slice(i);\n};\n/**\n * Appends gop information (timing and byteLength) received by the transmuxer for the\n * gops appended in the last call to appendBuffer\n *\n * @param {Array} buffer\n * The current buffer of gop information\n * @param {Array} gops\n * List of new gop information\n * @param {boolean} replace\n * If true, replace the buffer with the new gop information. If false, append the\n * new gop information to the buffer in the right location of time.\n * @return {Array}\n * Updated list of gop information\n */\n\nconst updateGopBuffer = (buffer, gops, replace) => {\n if (!gops.length) {\n return buffer;\n }\n if (replace) {\n // If we are in safe append mode, then completely overwrite the gop buffer\n // with the most recent appeneded data. This will make sure that when appending\n // future segments, we only try to align with gops that are both ahead of current\n // time and in the last segment appended.\n return gops.slice();\n }\n const start = gops[0].pts;\n let i = 0;\n for (i; i < buffer.length; i++) {\n if (buffer[i].pts >= start) {\n break;\n }\n }\n return buffer.slice(0, i).concat(gops);\n};\n/**\n * Removes gop information in buffer that overlaps with provided start and end\n *\n * @param {Array} buffer\n * The current buffer of gop information\n * @param {Double} start\n * position to start the remove at\n * @param {Double} end\n * position to end the remove at\n * @param {Double} mapping\n * Offset to map display time to stream presentation time\n */\n\nconst removeGopBuffer = (buffer, start, end, mapping) => {\n const startPts = Math.ceil((start - mapping) * ONE_SECOND_IN_TS);\n const endPts = Math.ceil((end - mapping) * ONE_SECOND_IN_TS);\n const updatedBuffer = buffer.slice();\n let i = buffer.length;\n while (i--) {\n if (buffer[i].pts <= endPts) {\n break;\n }\n }\n if (i === -1) {\n // no removal because end of remove range is before start of buffer\n return updatedBuffer;\n }\n let j = i + 1;\n while (j--) {\n if (buffer[j].pts <= startPts) {\n break;\n }\n } // clamp remove range start to 0 index\n\n j = Math.max(j, 0);\n updatedBuffer.splice(j, i - j + 1);\n return updatedBuffer;\n};\nconst shallowEqual = function (a, b) {\n // if both are undefined\n // or one or the other is undefined\n // they are not equal\n if (!a && !b || !a && b || a && !b) {\n return false;\n } // they are the same object and thus, equal\n\n if (a === b) {\n return true;\n } // sort keys so we can make sure they have\n // all the same keys later.\n\n const akeys = Object.keys(a).sort();\n const bkeys = Object.keys(b).sort(); // different number of keys, not equal\n\n if (akeys.length !== bkeys.length) {\n return false;\n }\n for (let i = 0; i < akeys.length; i++) {\n const key = akeys[i]; // different sorted keys, not equal\n\n if (key !== bkeys[i]) {\n return false;\n } // different values, not equal\n\n if (a[key] !== b[key]) {\n return false;\n }\n }\n return true;\n};\n\n/**\n * The segment loader has no recourse except to fetch a segment in the\n * current playlist and use the internal timestamps in that segment to\n * generate a syncPoint. This function returns a good candidate index\n * for that process.\n *\n * @param {Array} segments - the segments array from a playlist.\n * @return {number} An index of a segment from the playlist to load\n */\n\nconst getSyncSegmentCandidate = function (currentTimeline, segments, targetTime) {\n segments = segments || [];\n const timelineSegments = [];\n let time = 0;\n for (let i = 0; i < segments.length; i++) {\n const segment = segments[i];\n if (currentTimeline === segment.timeline) {\n timelineSegments.push(i);\n time += segment.duration;\n if (time > targetTime) {\n return i;\n }\n }\n }\n if (timelineSegments.length === 0) {\n return 0;\n } // default to the last timeline segment\n\n return timelineSegments[timelineSegments.length - 1];\n}; // In the event of a quota exceeded error, keep at least one second of back buffer. This\n// number was arbitrarily chosen and may be updated in the future, but seemed reasonable\n// as a start to prevent any potential issues with removing content too close to the\n// playhead.\n\nconst MIN_BACK_BUFFER = 1; // in ms\n\nconst CHECK_BUFFER_DELAY = 500;\nconst finite = num => typeof num === 'number' && isFinite(num); // With most content hovering around 30fps, if a segment has a duration less than a half\n// frame at 30fps or one frame at 60fps, the bandwidth and throughput calculations will\n// not accurately reflect the rest of the content.\n\nconst MIN_SEGMENT_DURATION_TO_SAVE_STATS = 1 / 60;\nconst illegalMediaSwitch = (loaderType, startingMedia, trackInfo) => {\n // Although these checks should most likely cover non 'main' types, for now it narrows\n // the scope of our checks.\n if (loaderType !== 'main' || !startingMedia || !trackInfo) {\n return null;\n }\n if (!trackInfo.hasAudio && !trackInfo.hasVideo) {\n return 'Neither audio nor video found in segment.';\n }\n if (startingMedia.hasVideo && !trackInfo.hasVideo) {\n return 'Only audio found in segment when we expected video.' + ' We can\\'t switch to audio only from a stream that had video.' + ' To get rid of this message, please add codec information to the manifest.';\n }\n if (!startingMedia.hasVideo && trackInfo.hasVideo) {\n return 'Video found in segment when we expected only audio.' + ' We can\\'t switch to a stream with video from an audio only stream.' + ' To get rid of this message, please add codec information to the manifest.';\n }\n return null;\n};\n/**\n * Calculates a time value that is safe to remove from the back buffer without interrupting\n * playback.\n *\n * @param {TimeRange} seekable\n * The current seekable range\n * @param {number} currentTime\n * The current time of the player\n * @param {number} targetDuration\n * The target duration of the current playlist\n * @return {number}\n * Time that is safe to remove from the back buffer without interrupting playback\n */\n\nconst safeBackBufferTrimTime = (seekable, currentTime, targetDuration) => {\n // 30 seconds before the playhead provides a safe default for trimming.\n //\n // Choosing a reasonable default is particularly important for high bitrate content and\n // VOD videos/live streams with large windows, as the buffer may end up overfilled and\n // throw an APPEND_BUFFER_ERR.\n let trimTime = currentTime - Config.BACK_BUFFER_LENGTH;\n if (seekable.length) {\n // Some live playlists may have a shorter window of content than the full allowed back\n // buffer. For these playlists, don't save content that's no longer within the window.\n trimTime = Math.max(trimTime, seekable.start(0));\n } // Don't remove within target duration of the current time to avoid the possibility of\n // removing the GOP currently being played, as removing it can cause playback stalls.\n\n const maxTrimTime = currentTime - targetDuration;\n return Math.min(maxTrimTime, trimTime);\n};\nconst segmentInfoString = segmentInfo => {\n const {\n startOfSegment,\n duration,\n segment,\n part,\n playlist: {\n mediaSequence: seq,\n id,\n segments = []\n },\n mediaIndex: index,\n partIndex,\n timeline\n } = segmentInfo;\n const segmentLen = segments.length - 1;\n let selection = 'mediaIndex/partIndex increment';\n if (segmentInfo.getMediaInfoForTime) {\n selection = `getMediaInfoForTime (${segmentInfo.getMediaInfoForTime})`;\n } else if (segmentInfo.isSyncRequest) {\n selection = 'getSyncSegmentCandidate (isSyncRequest)';\n }\n if (segmentInfo.independent) {\n selection += ` with independent ${segmentInfo.independent}`;\n }\n const hasPartIndex = typeof partIndex === 'number';\n const name = segmentInfo.segment.uri ? 'segment' : 'pre-segment';\n const zeroBasedPartCount = hasPartIndex ? getKnownPartCount({\n preloadSegment: segment\n }) - 1 : 0;\n return `${name} [${seq + index}/${seq + segmentLen}]` + (hasPartIndex ? ` part [${partIndex}/${zeroBasedPartCount}]` : '') + ` segment start/end [${segment.start} => ${segment.end}]` + (hasPartIndex ? ` part start/end [${part.start} => ${part.end}]` : '') + ` startOfSegment [${startOfSegment}]` + ` duration [${duration}]` + ` timeline [${timeline}]` + ` selected by [${selection}]` + ` playlist [${id}]`;\n};\nconst timingInfoPropertyForMedia = mediaType => `${mediaType}TimingInfo`;\n/**\n * Returns the timestamp offset to use for the segment.\n *\n * @param {number} segmentTimeline\n * The timeline of the segment\n * @param {number} currentTimeline\n * The timeline currently being followed by the loader\n * @param {number} startOfSegment\n * The estimated segment start\n * @param {TimeRange[]} buffered\n * The loader's buffer\n * @param {boolean} overrideCheck\n * If true, no checks are made to see if the timestamp offset value should be set,\n * but sets it directly to a value.\n *\n * @return {number|null}\n * Either a number representing a new timestamp offset, or null if the segment is\n * part of the same timeline\n */\n\nconst timestampOffsetForSegment = ({\n segmentTimeline,\n currentTimeline,\n startOfSegment,\n buffered,\n overrideCheck\n}) => {\n // Check to see if we are crossing a discontinuity to see if we need to set the\n // timestamp offset on the transmuxer and source buffer.\n //\n // Previously, we changed the timestampOffset if the start of this segment was less than\n // the currently set timestampOffset, but this isn't desirable as it can produce bad\n // behavior, especially around long running live streams.\n if (!overrideCheck && segmentTimeline === currentTimeline) {\n return null;\n } // When changing renditions, it's possible to request a segment on an older timeline. For\n // instance, given two renditions with the following:\n //\n // #EXTINF:10\n // segment1\n // #EXT-X-DISCONTINUITY\n // #EXTINF:10\n // segment2\n // #EXTINF:10\n // segment3\n //\n // And the current player state:\n //\n // current time: 8\n // buffer: 0 => 20\n //\n // The next segment on the current rendition would be segment3, filling the buffer from\n // 20s onwards. However, if a rendition switch happens after segment2 was requested,\n // then the next segment to be requested will be segment1 from the new rendition in\n // order to fill time 8 and onwards. Using the buffered end would result in repeated\n // content (since it would position segment1 of the new rendition starting at 20s). This\n // case can be identified when the new segment's timeline is a prior value. Instead of\n // using the buffered end, the startOfSegment can be used, which, hopefully, will be\n // more accurate to the actual start time of the segment.\n\n if (segmentTimeline < currentTimeline) {\n return startOfSegment;\n } // segmentInfo.startOfSegment used to be used as the timestamp offset, however, that\n // value uses the end of the last segment if it is available. While this value\n // should often be correct, it's better to rely on the buffered end, as the new\n // content post discontinuity should line up with the buffered end as if it were\n // time 0 for the new content.\n\n return buffered.length ? buffered.end(buffered.length - 1) : startOfSegment;\n};\n/**\n * Returns whether or not the loader should wait for a timeline change from the timeline\n * change controller before processing the segment.\n *\n * Primary timing in VHS goes by video. This is different from most media players, as\n * audio is more often used as the primary timing source. For the foreseeable future, VHS\n * will continue to use video as the primary timing source, due to the current logic and\n * expectations built around it.\n\n * Since the timing follows video, in order to maintain sync, the video loader is\n * responsible for setting both audio and video source buffer timestamp offsets.\n *\n * Setting different values for audio and video source buffers could lead to\n * desyncing. The following examples demonstrate some of the situations where this\n * distinction is important. Note that all of these cases involve demuxed content. When\n * content is muxed, the audio and video are packaged together, therefore syncing\n * separate media playlists is not an issue.\n *\n * CASE 1: Audio prepares to load a new timeline before video:\n *\n * Timeline: 0 1\n * Audio Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Audio Loader: ^\n * Video Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Video Loader ^\n *\n * In the above example, the audio loader is preparing to load the 6th segment, the first\n * after a discontinuity, while the video loader is still loading the 5th segment, before\n * the discontinuity.\n *\n * If the audio loader goes ahead and loads and appends the 6th segment before the video\n * loader crosses the discontinuity, then when appended, the 6th audio segment will use\n * the timestamp offset from timeline 0. This will likely lead to desyncing. In addition,\n * the audio loader must provide the audioAppendStart value to trim the content in the\n * transmuxer, and that value relies on the audio timestamp offset. Since the audio\n * timestamp offset is set by the video (main) loader, the audio loader shouldn't load the\n * segment until that value is provided.\n *\n * CASE 2: Video prepares to load a new timeline before audio:\n *\n * Timeline: 0 1\n * Audio Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Audio Loader: ^\n * Video Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Video Loader ^\n *\n * In the above example, the video loader is preparing to load the 6th segment, the first\n * after a discontinuity, while the audio loader is still loading the 5th segment, before\n * the discontinuity.\n *\n * If the video loader goes ahead and loads and appends the 6th segment, then once the\n * segment is loaded and processed, both the video and audio timestamp offsets will be\n * set, since video is used as the primary timing source. This is to ensure content lines\n * up appropriately, as any modifications to the video timing are reflected by audio when\n * the video loader sets the audio and video timestamp offsets to the same value. However,\n * setting the timestamp offset for audio before audio has had a chance to change\n * timelines will likely lead to desyncing, as the audio loader will append segment 5 with\n * a timestamp intended to apply to segments from timeline 1 rather than timeline 0.\n *\n * CASE 3: When seeking, audio prepares to load a new timeline before video\n *\n * Timeline: 0 1\n * Audio Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Audio Loader: ^\n * Video Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Video Loader ^\n *\n * In the above example, both audio and video loaders are loading segments from timeline\n * 0, but imagine that the seek originated from timeline 1.\n *\n * When seeking to a new timeline, the timestamp offset will be set based on the expected\n * segment start of the loaded video segment. In order to maintain sync, the audio loader\n * must wait for the video loader to load its segment and update both the audio and video\n * timestamp offsets before it may load and append its own segment. This is the case\n * whether the seek results in a mismatched segment request (e.g., the audio loader\n * chooses to load segment 3 and the video loader chooses to load segment 4) or the\n * loaders choose to load the same segment index from each playlist, as the segments may\n * not be aligned perfectly, even for matching segment indexes.\n *\n * @param {Object} timelinechangeController\n * @param {number} currentTimeline\n * The timeline currently being followed by the loader\n * @param {number} segmentTimeline\n * The timeline of the segment being loaded\n * @param {('main'|'audio')} loaderType\n * The loader type\n * @param {boolean} audioDisabled\n * Whether the audio is disabled for the loader. This should only be true when the\n * loader may have muxed audio in its segment, but should not append it, e.g., for\n * the main loader when an alternate audio playlist is active.\n *\n * @return {boolean}\n * Whether the loader should wait for a timeline change from the timeline change\n * controller before processing the segment\n */\n\nconst shouldWaitForTimelineChange = ({\n timelineChangeController,\n currentTimeline,\n segmentTimeline,\n loaderType,\n audioDisabled\n}) => {\n if (currentTimeline === segmentTimeline) {\n return false;\n }\n if (loaderType === 'audio') {\n const lastMainTimelineChange = timelineChangeController.lastTimelineChange({\n type: 'main'\n }); // Audio loader should wait if:\n //\n // * main hasn't had a timeline change yet (thus has not loaded its first segment)\n // * main hasn't yet changed to the timeline audio is looking to load\n\n return !lastMainTimelineChange || lastMainTimelineChange.to !== segmentTimeline;\n } // The main loader only needs to wait for timeline changes if there's demuxed audio.\n // Otherwise, there's nothing to wait for, since audio would be muxed into the main\n // loader's segments (or the content is audio/video only and handled by the main\n // loader).\n\n if (loaderType === 'main' && audioDisabled) {\n const pendingAudioTimelineChange = timelineChangeController.pendingTimelineChange({\n type: 'audio'\n }); // Main loader should wait for the audio loader if audio is not pending a timeline\n // change to the current timeline.\n //\n // Since the main loader is responsible for setting the timestamp offset for both\n // audio and video, the main loader must wait for audio to be about to change to its\n // timeline before setting the offset, otherwise, if audio is behind in loading,\n // segments from the previous timeline would be adjusted by the new timestamp offset.\n //\n // This requirement means that video will not cross a timeline until the audio is\n // about to cross to it, so that way audio and video will always cross the timeline\n // together.\n //\n // In addition to normal timeline changes, these rules also apply to the start of a\n // stream (going from a non-existent timeline, -1, to timeline 0). It's important\n // that these rules apply to the first timeline change because if they did not, it's\n // possible that the main loader will cross two timelines before the audio loader has\n // crossed one. Logic may be implemented to handle the startup as a special case, but\n // it's easier to simply treat all timeline changes the same.\n\n if (pendingAudioTimelineChange && pendingAudioTimelineChange.to === segmentTimeline) {\n return false;\n }\n return true;\n }\n return false;\n};\nconst shouldFixBadTimelineChanges = timelineChangeController => {\n if (!timelineChangeController) {\n return false;\n }\n const pendingAudioTimelineChange = timelineChangeController.pendingTimelineChange({\n type: 'audio'\n });\n const pendingMainTimelineChange = timelineChangeController.pendingTimelineChange({\n type: 'main'\n });\n const hasPendingTimelineChanges = pendingAudioTimelineChange && pendingMainTimelineChange;\n const differentPendingChanges = hasPendingTimelineChanges && pendingAudioTimelineChange.to !== pendingMainTimelineChange.to;\n const isNotInitialPendingTimelineChange = hasPendingTimelineChanges && pendingAudioTimelineChange.from !== -1 && pendingMainTimelineChange.from !== -1;\n if (isNotInitialPendingTimelineChange && differentPendingChanges) {\n return true;\n }\n return false;\n};\nconst fixBadTimelineChange = segmentLoader => {\n if (!segmentLoader) {\n return;\n }\n segmentLoader.pause();\n segmentLoader.resetEverything();\n segmentLoader.load();\n};\nconst mediaDuration = timingInfos => {\n let maxDuration = 0;\n ['video', 'audio'].forEach(function (type) {\n const typeTimingInfo = timingInfos[`${type}TimingInfo`];\n if (!typeTimingInfo) {\n return;\n }\n const {\n start,\n end\n } = typeTimingInfo;\n let duration;\n if (typeof start === 'bigint' || typeof end === 'bigint') {\n duration = window$1.BigInt(end) - window$1.BigInt(start);\n } else if (typeof start === 'number' && typeof end === 'number') {\n duration = end - start;\n }\n if (typeof duration !== 'undefined' && duration > maxDuration) {\n maxDuration = duration;\n }\n }); // convert back to a number if it is lower than MAX_SAFE_INTEGER\n // as we only need BigInt when we are above that.\n\n if (typeof maxDuration === 'bigint' && maxDuration < Number.MAX_SAFE_INTEGER) {\n maxDuration = Number(maxDuration);\n }\n return maxDuration;\n};\nconst segmentTooLong = ({\n segmentDuration,\n maxDuration\n}) => {\n // 0 duration segments are most likely due to metadata only segments or a lack of\n // information.\n if (!segmentDuration) {\n return false;\n } // For HLS:\n //\n // https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.3.1\n // The EXTINF duration of each Media Segment in the Playlist\n // file, when rounded to the nearest integer, MUST be less than or equal\n // to the target duration; longer segments can trigger playback stalls\n // or other errors.\n //\n // For DASH, the mpd-parser uses the largest reported segment duration as the target\n // duration. Although that reported duration is occasionally approximate (i.e., not\n // exact), a strict check may report that a segment is too long more often in DASH.\n\n return Math.round(segmentDuration) > maxDuration + TIME_FUDGE_FACTOR;\n};\nconst getTroublesomeSegmentDurationMessage = (segmentInfo, sourceType) => {\n // Right now we aren't following DASH's timing model exactly, so only perform\n // this check for HLS content.\n if (sourceType !== 'hls') {\n return null;\n }\n const segmentDuration = mediaDuration({\n audioTimingInfo: segmentInfo.audioTimingInfo,\n videoTimingInfo: segmentInfo.videoTimingInfo\n }); // Don't report if we lack information.\n //\n // If the segment has a duration of 0 it is either a lack of information or a\n // metadata only segment and shouldn't be reported here.\n\n if (!segmentDuration) {\n return null;\n }\n const targetDuration = segmentInfo.playlist.targetDuration;\n const isSegmentWayTooLong = segmentTooLong({\n segmentDuration,\n maxDuration: targetDuration * 2\n });\n const isSegmentSlightlyTooLong = segmentTooLong({\n segmentDuration,\n maxDuration: targetDuration\n });\n const segmentTooLongMessage = `Segment with index ${segmentInfo.mediaIndex} ` + `from playlist ${segmentInfo.playlist.id} ` + `has a duration of ${segmentDuration} ` + `when the reported duration is ${segmentInfo.duration} ` + `and the target duration is ${targetDuration}. ` + 'For HLS content, a duration in excess of the target duration may result in ' + 'playback issues. See the HLS specification section on EXT-X-TARGETDURATION for ' + 'more details: ' + 'https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.3.1';\n if (isSegmentWayTooLong || isSegmentSlightlyTooLong) {\n return {\n severity: isSegmentWayTooLong ? 'warn' : 'info',\n message: segmentTooLongMessage\n };\n }\n return null;\n};\n/**\n *\n * @param {Object} options type of segment loader and segment either segmentInfo or simple segment\n * @return a segmentInfo payload for events or errors.\n */\n\nconst segmentInfoPayload = ({\n type,\n segment\n}) => {\n if (!segment) {\n return;\n }\n const isEncrypted = Boolean(segment.key || segment.map && segment.map.ke);\n const isMediaInitialization = Boolean(segment.map && !segment.map.bytes);\n const start = segment.startOfSegment === undefined ? segment.start : segment.startOfSegment;\n return {\n type: type || segment.type,\n uri: segment.resolvedUri || segment.uri,\n start,\n duration: segment.duration,\n isEncrypted,\n isMediaInitialization\n };\n};\n/**\n * An object that manages segment loading and appending.\n *\n * @class SegmentLoader\n * @param {Object} options required and optional options\n * @extends videojs.EventTarget\n */\n\nclass SegmentLoader extends videojs.EventTarget {\n constructor(settings, options = {}) {\n super(); // check pre-conditions\n\n if (!settings) {\n throw new TypeError('Initialization settings are required');\n }\n if (typeof settings.currentTime !== 'function') {\n throw new TypeError('No currentTime getter specified');\n }\n if (!settings.mediaSource) {\n throw new TypeError('No MediaSource specified');\n } // public properties\n\n this.bandwidth = settings.bandwidth;\n this.throughput = {\n rate: 0,\n count: 0\n };\n this.roundTrip = NaN;\n this.resetStats_();\n this.mediaIndex = null;\n this.partIndex = null; // private settings\n\n this.hasPlayed_ = settings.hasPlayed;\n this.currentTime_ = settings.currentTime;\n this.seekable_ = settings.seekable;\n this.seeking_ = settings.seeking;\n this.duration_ = settings.duration;\n this.mediaSource_ = settings.mediaSource;\n this.vhs_ = settings.vhs;\n this.loaderType_ = settings.loaderType;\n this.currentMediaInfo_ = void 0;\n this.startingMediaInfo_ = void 0;\n this.segmentMetadataTrack_ = settings.segmentMetadataTrack;\n this.goalBufferLength_ = settings.goalBufferLength;\n this.sourceType_ = settings.sourceType;\n this.sourceUpdater_ = settings.sourceUpdater;\n this.inbandTextTracks_ = settings.inbandTextTracks;\n this.state_ = 'INIT';\n this.timelineChangeController_ = settings.timelineChangeController;\n this.shouldSaveSegmentTimingInfo_ = true;\n this.parse708captions_ = settings.parse708captions;\n this.useDtsForTimestampOffset_ = settings.useDtsForTimestampOffset;\n this.captionServices_ = settings.captionServices;\n this.exactManifestTimings = settings.exactManifestTimings;\n this.addMetadataToTextTrack = settings.addMetadataToTextTrack; // private instance variables\n\n this.checkBufferTimeout_ = null;\n this.error_ = void 0;\n this.currentTimeline_ = -1;\n this.shouldForceTimestampOffsetAfterResync_ = false;\n this.pendingSegment_ = null;\n this.xhrOptions_ = null;\n this.pendingSegments_ = [];\n this.audioDisabled_ = false;\n this.isPendingTimestampOffset_ = false; // TODO possibly move gopBuffer and timeMapping info to a separate controller\n\n this.gopBuffer_ = [];\n this.timeMapping_ = 0;\n this.safeAppend_ = false;\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n this.playlistOfLastInitSegment_ = {\n audio: null,\n video: null\n };\n this.callQueue_ = []; // If the segment loader prepares to load a segment, but does not have enough\n // information yet to start the loading process (e.g., if the audio loader wants to\n // load a segment from the next timeline but the main loader hasn't yet crossed that\n // timeline), then the load call will be added to the queue until it is ready to be\n // processed.\n\n this.loadQueue_ = [];\n this.metadataQueue_ = {\n id3: [],\n caption: []\n };\n this.waitingOnRemove_ = false;\n this.quotaExceededErrorRetryTimeout_ = null; // Fragmented mp4 playback\n\n this.activeInitSegmentId_ = null;\n this.initSegments_ = {}; // HLSe playback\n\n this.cacheEncryptionKeys_ = settings.cacheEncryptionKeys;\n this.keyCache_ = {};\n this.decrypter_ = settings.decrypter; // Manages the tracking and generation of sync-points, mappings\n // between a time in the display time and a segment index within\n // a playlist\n\n this.syncController_ = settings.syncController;\n this.syncPoint_ = {\n segmentIndex: 0,\n time: 0\n };\n this.transmuxer_ = this.createTransmuxer_();\n this.triggerSyncInfoUpdate_ = () => this.trigger('syncinfoupdate');\n this.syncController_.on('syncinfoupdate', this.triggerSyncInfoUpdate_);\n this.mediaSource_.addEventListener('sourceopen', () => {\n if (!this.isEndOfStream_()) {\n this.ended_ = false;\n }\n }); // ...for determining the fetch location\n\n this.fetchAtBuffer_ = false;\n this.logger_ = logger(`SegmentLoader[${this.loaderType_}]`);\n Object.defineProperty(this, 'state', {\n get() {\n return this.state_;\n },\n set(newState) {\n if (newState !== this.state_) {\n this.logger_(`${this.state_} -> ${newState}`);\n this.state_ = newState;\n this.trigger('statechange');\n }\n }\n });\n this.sourceUpdater_.on('ready', () => {\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n });\n this.sourceUpdater_.on('codecschange', metadata => {\n this.trigger(_extends({\n type: 'codecschange'\n }, metadata));\n }); // Only the main loader needs to listen for pending timeline changes, as the main\n // loader should wait for audio to be ready to change its timeline so that both main\n // and audio timelines change together. For more details, see the\n // shouldWaitForTimelineChange function.\n\n if (this.loaderType_ === 'main') {\n this.timelineChangeController_.on('pendingtimelinechange', () => {\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n });\n } // The main loader only listens on pending timeline changes, but the audio loader,\n // since its loads follow main, needs to listen on timeline changes. For more details,\n // see the shouldWaitForTimelineChange function.\n\n if (this.loaderType_ === 'audio') {\n this.timelineChangeController_.on('timelinechange', metadata => {\n this.trigger(_extends({\n type: 'timelinechange'\n }, metadata));\n if (this.hasEnoughInfoToLoad_()) {\n this.processLoadQueue_();\n }\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n });\n }\n }\n /**\n * TODO: Current sync controller consists of many hls-specific strategies\n * media sequence sync is also hls-specific, and we would like to be protocol-agnostic on this level\n * this should be a part of the sync-controller and sync controller should expect different strategy list based on the protocol.\n *\n * @return {MediaSequenceSync|null}\n * @private\n */\n\n get mediaSequenceSync_() {\n return this.syncController_.getMediaSequenceSync(this.loaderType_);\n }\n createTransmuxer_() {\n return segmentTransmuxer.createTransmuxer({\n remux: false,\n alignGopsAtEnd: this.safeAppend_,\n keepOriginalTimestamps: true,\n parse708captions: this.parse708captions_,\n captionServices: this.captionServices_\n });\n }\n /**\n * reset all of our media stats\n *\n * @private\n */\n\n resetStats_() {\n this.mediaBytesTransferred = 0;\n this.mediaRequests = 0;\n this.mediaRequestsAborted = 0;\n this.mediaRequestsTimedout = 0;\n this.mediaRequestsErrored = 0;\n this.mediaTransferDuration = 0;\n this.mediaSecondsLoaded = 0;\n this.mediaAppends = 0;\n }\n /**\n * dispose of the SegmentLoader and reset to the default state\n */\n\n dispose() {\n this.trigger('dispose');\n this.state = 'DISPOSED';\n this.pause();\n this.abort_();\n if (this.transmuxer_) {\n this.transmuxer_.terminate();\n }\n this.resetStats_();\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n }\n if (this.syncController_ && this.triggerSyncInfoUpdate_) {\n this.syncController_.off('syncinfoupdate', this.triggerSyncInfoUpdate_);\n }\n this.off();\n }\n setAudio(enable) {\n this.audioDisabled_ = !enable;\n if (enable) {\n this.appendInitSegment_.audio = true;\n } else {\n // remove current track audio if it gets disabled\n this.sourceUpdater_.removeAudio(0, this.duration_());\n }\n }\n /**\n * abort anything that is currently doing on with the SegmentLoader\n * and reset to a default state\n */\n\n abort() {\n if (this.state !== 'WAITING') {\n if (this.pendingSegment_) {\n this.pendingSegment_ = null;\n }\n return;\n }\n this.abort_(); // We aborted the requests we were waiting on, so reset the loader's state to READY\n // since we are no longer \"waiting\" on any requests. XHR callback is not always run\n // when the request is aborted. This will prevent the loader from being stuck in the\n // WAITING state indefinitely.\n\n this.state = 'READY'; // don't wait for buffer check timeouts to begin fetching the\n // next segment\n\n if (!this.paused()) {\n this.monitorBuffer_();\n }\n }\n /**\n * abort all pending xhr requests and null any pending segements\n *\n * @private\n */\n\n abort_() {\n if (this.pendingSegment_ && this.pendingSegment_.abortRequests) {\n this.pendingSegment_.abortRequests();\n } // clear out the segment being processed\n\n this.pendingSegment_ = null;\n this.callQueue_ = [];\n this.loadQueue_ = [];\n this.metadataQueue_.id3 = [];\n this.metadataQueue_.caption = [];\n this.timelineChangeController_.clearPendingTimelineChange(this.loaderType_);\n this.waitingOnRemove_ = false;\n window$1.clearTimeout(this.quotaExceededErrorRetryTimeout_);\n this.quotaExceededErrorRetryTimeout_ = null;\n }\n checkForAbort_(requestId) {\n // If the state is APPENDING, then aborts will not modify the state, meaning the first\n // callback that happens should reset the state to READY so that loading can continue.\n if (this.state === 'APPENDING' && !this.pendingSegment_) {\n this.state = 'READY';\n return true;\n }\n if (!this.pendingSegment_ || this.pendingSegment_.requestId !== requestId) {\n return true;\n }\n return false;\n }\n /**\n * set an error on the segment loader and null out any pending segements\n *\n * @param {Error} error the error to set on the SegmentLoader\n * @return {Error} the error that was set or that is currently set\n */\n\n error(error) {\n if (typeof error !== 'undefined') {\n this.logger_('error occurred:', error);\n this.error_ = error;\n }\n this.pendingSegment_ = null;\n return this.error_;\n }\n endOfStream() {\n this.ended_ = true;\n if (this.transmuxer_) {\n // need to clear out any cached data to prepare for the new segment\n segmentTransmuxer.reset(this.transmuxer_);\n }\n this.gopBuffer_.length = 0;\n this.pause();\n this.trigger('ended');\n }\n /**\n * Indicates which time ranges are buffered\n *\n * @return {TimeRange}\n * TimeRange object representing the current buffered ranges\n */\n\n buffered_() {\n const trackInfo = this.getMediaInfo_();\n if (!this.sourceUpdater_ || !trackInfo) {\n return createTimeRanges();\n }\n if (this.loaderType_ === 'main') {\n const {\n hasAudio,\n hasVideo,\n isMuxed\n } = trackInfo;\n if (hasVideo && hasAudio && !this.audioDisabled_ && !isMuxed) {\n return this.sourceUpdater_.buffered();\n }\n if (hasVideo) {\n return this.sourceUpdater_.videoBuffered();\n }\n } // One case that can be ignored for now is audio only with alt audio,\n // as we don't yet have proper support for that.\n\n return this.sourceUpdater_.audioBuffered();\n }\n /**\n * Gets and sets init segment for the provided map\n *\n * @param {Object} map\n * The map object representing the init segment to get or set\n * @param {boolean=} set\n * If true, the init segment for the provided map should be saved\n * @return {Object}\n * map object for desired init segment\n */\n\n initSegmentForMap(map, set = false) {\n if (!map) {\n return null;\n }\n const id = initSegmentId(map);\n let storedMap = this.initSegments_[id];\n if (set && !storedMap && map.bytes) {\n this.initSegments_[id] = storedMap = {\n resolvedUri: map.resolvedUri,\n byterange: map.byterange,\n bytes: map.bytes,\n tracks: map.tracks,\n timescales: map.timescales\n };\n }\n return storedMap || map;\n }\n /**\n * Gets and sets key for the provided key\n *\n * @param {Object} key\n * The key object representing the key to get or set\n * @param {boolean=} set\n * If true, the key for the provided key should be saved\n * @return {Object}\n * Key object for desired key\n */\n\n segmentKey(key, set = false) {\n if (!key) {\n return null;\n }\n const id = segmentKeyId(key);\n let storedKey = this.keyCache_[id]; // TODO: We should use the HTTP Expires header to invalidate our cache per\n // https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-6.2.3\n\n if (this.cacheEncryptionKeys_ && set && !storedKey && key.bytes) {\n this.keyCache_[id] = storedKey = {\n resolvedUri: key.resolvedUri,\n bytes: key.bytes\n };\n }\n const result = {\n resolvedUri: (storedKey || key).resolvedUri\n };\n if (storedKey) {\n result.bytes = storedKey.bytes;\n }\n return result;\n }\n /**\n * Returns true if all configuration required for loading is present, otherwise false.\n *\n * @return {boolean} True if the all configuration is ready for loading\n * @private\n */\n\n couldBeginLoading_() {\n return this.playlist_ && !this.paused();\n }\n /**\n * load a playlist and start to fill the buffer\n */\n\n load() {\n // un-pause\n this.monitorBuffer_(); // if we don't have a playlist yet, keep waiting for one to be\n // specified\n\n if (!this.playlist_) {\n return;\n } // if all the configuration is ready, initialize and begin loading\n\n if (this.state === 'INIT' && this.couldBeginLoading_()) {\n return this.init_();\n } // if we're in the middle of processing a segment already, don't\n // kick off an additional segment request\n\n if (!this.couldBeginLoading_() || this.state !== 'READY' && this.state !== 'INIT') {\n return;\n }\n this.state = 'READY';\n }\n /**\n * Once all the starting parameters have been specified, begin\n * operation. This method should only be invoked from the INIT\n * state.\n *\n * @private\n */\n\n init_() {\n this.state = 'READY'; // if this is the audio segment loader, and it hasn't been inited before, then any old\n // audio data from the muxed content should be removed\n\n this.resetEverything();\n return this.monitorBuffer_();\n }\n /**\n * set a playlist on the segment loader\n *\n * @param {PlaylistLoader} media the playlist to set on the segment loader\n */\n\n playlist(newPlaylist, options = {}) {\n if (!newPlaylist) {\n return;\n }\n const oldPlaylist = this.playlist_;\n const segmentInfo = this.pendingSegment_;\n this.playlist_ = newPlaylist;\n this.xhrOptions_ = options; // when we haven't started playing yet, the start of a live playlist\n // is always our zero-time so force a sync update each time the playlist\n // is refreshed from the server\n //\n // Use the INIT state to determine if playback has started, as the playlist sync info\n // should be fixed once requests begin (as sync points are generated based on sync\n // info), but not before then.\n\n if (this.state === 'INIT') {\n newPlaylist.syncInfo = {\n mediaSequence: newPlaylist.mediaSequence,\n time: 0\n }; // Setting the date time mapping means mapping the program date time (if available)\n // to time 0 on the player's timeline. The playlist's syncInfo serves a similar\n // purpose, mapping the initial mediaSequence to time zero. Since the syncInfo can\n // be updated as the playlist is refreshed before the loader starts loading, the\n // program date time mapping needs to be updated as well.\n //\n // This mapping is only done for the main loader because a program date time should\n // map equivalently between playlists.\n\n if (this.loaderType_ === 'main') {\n this.syncController_.setDateTimeMappingForStart(newPlaylist);\n }\n }\n let oldId = null;\n if (oldPlaylist) {\n if (oldPlaylist.id) {\n oldId = oldPlaylist.id;\n } else if (oldPlaylist.uri) {\n oldId = oldPlaylist.uri;\n }\n }\n this.logger_(`playlist update [${oldId} => ${newPlaylist.id || newPlaylist.uri}]`);\n if (this.mediaSequenceSync_) {\n this.mediaSequenceSync_.update(newPlaylist, this.currentTime_());\n this.logger_(`Playlist update:\ncurrentTime: ${this.currentTime_()}\nbufferedEnd: ${lastBufferedEnd(this.buffered_())}\n`, this.mediaSequenceSync_.diagnostics);\n } // in VOD, this is always a rendition switch (or we updated our syncInfo above)\n // in LIVE, we always want to update with new playlists (including refreshes)\n\n this.trigger('syncinfoupdate'); // if we were unpaused but waiting for a playlist, start\n // buffering now\n\n if (this.state === 'INIT' && this.couldBeginLoading_()) {\n return this.init_();\n }\n if (!oldPlaylist || oldPlaylist.uri !== newPlaylist.uri) {\n if (this.mediaIndex !== null) {\n // we must reset/resync the segment loader when we switch renditions and\n // the segment loader is already synced to the previous rendition\n // We only want to reset the loader here for LLHLS playback, as resetLoader sets fetchAtBuffer_\n // to false, resulting in fetching segments at currentTime and causing repeated\n // same-segment requests on playlist change. This erroneously drives up the playback watcher\n // stalled segment count, as re-requesting segments at the currentTime or browser cached segments\n // will not change the buffer.\n // Reference for LLHLS fixes: https://github.com/videojs/http-streaming/pull/1201\n const isLLHLS = !newPlaylist.endList && typeof newPlaylist.partTargetDuration === 'number';\n if (isLLHLS) {\n this.resetLoader();\n } else {\n this.resyncLoader();\n }\n }\n this.currentMediaInfo_ = void 0;\n this.trigger('playlistupdate'); // the rest of this function depends on `oldPlaylist` being defined\n\n return;\n } // we reloaded the same playlist so we are in a live scenario\n // and we will likely need to adjust the mediaIndex\n\n const mediaSequenceDiff = newPlaylist.mediaSequence - oldPlaylist.mediaSequence;\n this.logger_(`live window shift [${mediaSequenceDiff}]`); // update the mediaIndex on the SegmentLoader\n // this is important because we can abort a request and this value must be\n // equal to the last appended mediaIndex\n\n if (this.mediaIndex !== null) {\n this.mediaIndex -= mediaSequenceDiff; // this can happen if we are going to load the first segment, but get a playlist\n // update during that. mediaIndex would go from 0 to -1 if mediaSequence in the\n // new playlist was incremented by 1.\n\n if (this.mediaIndex < 0) {\n this.mediaIndex = null;\n this.partIndex = null;\n } else {\n const segment = this.playlist_.segments[this.mediaIndex]; // partIndex should remain the same for the same segment\n // unless parts fell off of the playlist for this segment.\n // In that case we need to reset partIndex and resync\n\n if (this.partIndex && (!segment.parts || !segment.parts.length || !segment.parts[this.partIndex])) {\n const mediaIndex = this.mediaIndex;\n this.logger_(`currently processing part (index ${this.partIndex}) no longer exists.`);\n this.resetLoader(); // We want to throw away the partIndex and the data associated with it,\n // as the part was dropped from our current playlists segment.\n // The mediaIndex will still be valid so keep that around.\n\n this.mediaIndex = mediaIndex;\n }\n }\n } // update the mediaIndex on the SegmentInfo object\n // this is important because we will update this.mediaIndex with this value\n // in `handleAppendsDone_` after the segment has been successfully appended\n\n if (segmentInfo) {\n segmentInfo.mediaIndex -= mediaSequenceDiff;\n if (segmentInfo.mediaIndex < 0) {\n segmentInfo.mediaIndex = null;\n segmentInfo.partIndex = null;\n } else {\n // we need to update the referenced segment so that timing information is\n // saved for the new playlist's segment, however, if the segment fell off the\n // playlist, we can leave the old reference and just lose the timing info\n if (segmentInfo.mediaIndex >= 0) {\n segmentInfo.segment = newPlaylist.segments[segmentInfo.mediaIndex];\n }\n if (segmentInfo.partIndex >= 0 && segmentInfo.segment.parts) {\n segmentInfo.part = segmentInfo.segment.parts[segmentInfo.partIndex];\n }\n }\n }\n this.syncController_.saveExpiredSegmentInfo(oldPlaylist, newPlaylist);\n }\n /**\n * Prevent the loader from fetching additional segments. If there\n * is a segment request outstanding, it will finish processing\n * before the loader halts. A segment loader can be unpaused by\n * calling load().\n */\n\n pause() {\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n this.checkBufferTimeout_ = null;\n }\n }\n /**\n * Returns whether the segment loader is fetching additional\n * segments when given the opportunity. This property can be\n * modified through calls to pause() and load().\n */\n\n paused() {\n return this.checkBufferTimeout_ === null;\n }\n /**\n * Delete all the buffered data and reset the SegmentLoader\n *\n * @param {Function} [done] an optional callback to be executed when the remove\n * operation is complete\n */\n\n resetEverything(done) {\n this.ended_ = false;\n this.activeInitSegmentId_ = null;\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n this.resetLoader(); // remove from 0, the earliest point, to Infinity, to signify removal of everything.\n // VTT Segment Loader doesn't need to do anything but in the regular SegmentLoader,\n // we then clamp the value to duration if necessary.\n\n this.remove(0, Infinity, done); // clears fmp4 captions\n\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearAllMp4Captions'\n }); // reset the cache in the transmuxer\n\n this.transmuxer_.postMessage({\n action: 'reset'\n });\n }\n }\n /**\n * Force the SegmentLoader to resync and start loading around the currentTime instead\n * of starting at the end of the buffer\n *\n * Useful for fast quality changes\n */\n\n resetLoader() {\n this.fetchAtBuffer_ = false;\n if (this.mediaSequenceSync_) {\n this.mediaSequenceSync_.resetAppendedStatus();\n }\n this.resyncLoader();\n }\n /**\n * Force the SegmentLoader to restart synchronization and make a conservative guess\n * before returning to the simple walk-forward method\n */\n\n resyncLoader() {\n if (this.transmuxer_) {\n // need to clear out any cached data to prepare for the new segment\n segmentTransmuxer.reset(this.transmuxer_);\n }\n this.mediaIndex = null;\n this.partIndex = null;\n this.syncPoint_ = null;\n this.isPendingTimestampOffset_ = false; // this is mainly to sync timing-info when switching between renditions with and without timestamp-rollover,\n // so we don't want it for DASH or fragmented mp4 segments.\n\n const isFmp4 = this.currentMediaInfo_ && this.currentMediaInfo_.isFmp4;\n const isHlsTs = this.sourceType_ === 'hls' && !isFmp4;\n if (isHlsTs) {\n this.shouldForceTimestampOffsetAfterResync_ = true;\n }\n this.callQueue_ = [];\n this.loadQueue_ = [];\n this.metadataQueue_.id3 = [];\n this.metadataQueue_.caption = [];\n this.abort();\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearParsedMp4Captions'\n });\n }\n }\n /**\n * Remove any data in the source buffer between start and end times\n *\n * @param {number} start - the start time of the region to remove from the buffer\n * @param {number} end - the end time of the region to remove from the buffer\n * @param {Function} [done] - an optional callback to be executed when the remove\n * @param {boolean} force - force all remove operations to happen\n * operation is complete\n */\n\n remove(start, end, done = () => {}, force = false) {\n // clamp end to duration if we need to remove everything.\n // This is due to a browser bug that causes issues if we remove to Infinity.\n // videojs/videojs-contrib-hls#1225\n if (end === Infinity) {\n end = this.duration_();\n } // skip removes that would throw an error\n // commonly happens during a rendition switch at the start of a video\n // from start 0 to end 0\n\n if (end <= start) {\n this.logger_('skipping remove because end ${end} is <= start ${start}');\n return;\n }\n if (!this.sourceUpdater_ || !this.getMediaInfo_()) {\n this.logger_('skipping remove because no source updater or starting media info'); // nothing to remove if we haven't processed any media\n\n return;\n } // set it to one to complete this function's removes\n\n let removesRemaining = 1;\n const removeFinished = () => {\n removesRemaining--;\n if (removesRemaining === 0) {\n done();\n }\n };\n if (force || !this.audioDisabled_) {\n removesRemaining++;\n this.sourceUpdater_.removeAudio(start, end, removeFinished);\n } // While it would be better to only remove video if the main loader has video, this\n // should be safe with audio only as removeVideo will call back even if there's no\n // video buffer.\n //\n // In theory we can check to see if there's video before calling the remove, but in\n // the event that we're switching between renditions and from video to audio only\n // (when we add support for that), we may need to clear the video contents despite\n // what the new media will contain.\n\n if (force || this.loaderType_ === 'main') {\n this.gopBuffer_ = removeGopBuffer(this.gopBuffer_, start, end, this.timeMapping_);\n removesRemaining++;\n this.sourceUpdater_.removeVideo(start, end, removeFinished);\n } // remove any captions and ID3 tags\n\n for (const track in this.inbandTextTracks_) {\n removeCuesFromTrack(start, end, this.inbandTextTracks_[track]);\n }\n removeCuesFromTrack(start, end, this.segmentMetadataTrack_); // finished this function's removes\n\n removeFinished();\n }\n /**\n * (re-)schedule monitorBufferTick_ to run as soon as possible\n *\n * @private\n */\n\n monitorBuffer_() {\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n }\n this.checkBufferTimeout_ = window$1.setTimeout(this.monitorBufferTick_.bind(this), 1);\n }\n /**\n * As long as the SegmentLoader is in the READY state, periodically\n * invoke fillBuffer_().\n *\n * @private\n */\n\n monitorBufferTick_() {\n if (this.state === 'READY') {\n this.fillBuffer_();\n }\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n }\n this.checkBufferTimeout_ = window$1.setTimeout(this.monitorBufferTick_.bind(this), CHECK_BUFFER_DELAY);\n }\n /**\n * fill the buffer with segements unless the sourceBuffers are\n * currently updating\n *\n * Note: this function should only ever be called by monitorBuffer_\n * and never directly\n *\n * @private\n */\n\n fillBuffer_() {\n // TODO since the source buffer maintains a queue, and we shouldn't call this function\n // except when we're ready for the next segment, this check can most likely be removed\n if (this.sourceUpdater_.updating()) {\n return;\n } // see if we need to begin loading immediately\n\n const segmentInfo = this.chooseNextRequest_();\n if (!segmentInfo) {\n return;\n }\n const metadata = {\n segmentInfo: segmentInfoPayload({\n type: this.loaderType_,\n segment: segmentInfo\n })\n };\n this.trigger({\n type: 'segmentselected',\n metadata\n });\n if (typeof segmentInfo.timestampOffset === 'number') {\n this.isPendingTimestampOffset_ = false;\n this.timelineChangeController_.pendingTimelineChange({\n type: this.loaderType_,\n from: this.currentTimeline_,\n to: segmentInfo.timeline\n });\n }\n this.loadSegment_(segmentInfo);\n }\n /**\n * Determines if we should call endOfStream on the media source based\n * on the state of the buffer or if appened segment was the final\n * segment in the playlist.\n *\n * @param {number} [mediaIndex] the media index of segment we last appended\n * @param {Object} [playlist] a media playlist object\n * @return {boolean} do we need to call endOfStream on the MediaSource\n */\n\n isEndOfStream_(mediaIndex = this.mediaIndex, playlist = this.playlist_, partIndex = this.partIndex) {\n if (!playlist || !this.mediaSource_) {\n return false;\n }\n const segment = typeof mediaIndex === 'number' && playlist.segments[mediaIndex]; // mediaIndex is zero based but length is 1 based\n\n const appendedLastSegment = mediaIndex + 1 === playlist.segments.length; // true if there are no parts, or this is the last part.\n\n const appendedLastPart = !segment || !segment.parts || partIndex + 1 === segment.parts.length; // if we've buffered to the end of the video, we need to call endOfStream\n // so that MediaSources can trigger the `ended` event when it runs out of\n // buffered data instead of waiting for me\n\n return playlist.endList && this.mediaSource_.readyState === 'open' && appendedLastSegment && appendedLastPart;\n }\n /**\n * Determines what request should be made given current segment loader state.\n *\n * @return {Object} a request object that describes the segment/part to load\n */\n\n chooseNextRequest_() {\n const buffered = this.buffered_();\n const bufferedEnd = lastBufferedEnd(buffered) || 0;\n const bufferedTime = timeAheadOf(buffered, this.currentTime_());\n const preloaded = !this.hasPlayed_() && bufferedTime >= 1;\n const haveEnoughBuffer = bufferedTime >= this.goalBufferLength_();\n const segments = this.playlist_.segments; // return no segment if:\n // 1. we don't have segments\n // 2. The video has not yet played and we already downloaded a segment\n // 3. we already have enough buffered time\n\n if (!segments.length || preloaded || haveEnoughBuffer) {\n return null;\n }\n this.syncPoint_ = this.syncPoint_ || this.syncController_.getSyncPoint(this.playlist_, this.duration_(), this.currentTimeline_, this.currentTime_(), this.loaderType_);\n const next = {\n partIndex: null,\n mediaIndex: null,\n startOfSegment: null,\n playlist: this.playlist_,\n isSyncRequest: Boolean(!this.syncPoint_)\n };\n if (next.isSyncRequest) {\n next.mediaIndex = getSyncSegmentCandidate(this.currentTimeline_, segments, bufferedEnd);\n this.logger_(`choose next request. Can not find sync point. Fallback to media Index: ${next.mediaIndex}`);\n } else if (this.mediaIndex !== null) {\n const segment = segments[this.mediaIndex];\n const partIndex = typeof this.partIndex === 'number' ? this.partIndex : -1;\n next.startOfSegment = segment.end ? segment.end : bufferedEnd;\n if (segment.parts && segment.parts[partIndex + 1]) {\n next.mediaIndex = this.mediaIndex;\n next.partIndex = partIndex + 1;\n } else {\n next.mediaIndex = this.mediaIndex + 1;\n }\n } else {\n let segmentIndex;\n let partIndex;\n let startTime;\n const targetTime = this.fetchAtBuffer_ ? bufferedEnd : this.currentTime_();\n if (this.mediaSequenceSync_) {\n this.logger_(`chooseNextRequest_ request after Quality Switch:\nFor TargetTime: ${targetTime}.\nCurrentTime: ${this.currentTime_()}\nBufferedEnd: ${bufferedEnd}\nFetch At Buffer: ${this.fetchAtBuffer_}\n`, this.mediaSequenceSync_.diagnostics);\n }\n if (this.mediaSequenceSync_ && this.mediaSequenceSync_.isReliable) {\n const syncInfo = this.getSyncInfoFromMediaSequenceSync_(targetTime);\n if (!syncInfo) {\n const message = 'No sync info found while using media sequence sync';\n this.error({\n message,\n metadata: {\n errorType: videojs.Error.StreamingFailedToSelectNextSegment,\n error: new Error(message)\n }\n });\n this.logger_('chooseNextRequest_ - no sync info found using media sequence sync'); // no match\n\n return null;\n }\n this.logger_(`chooseNextRequest_ mediaSequence syncInfo (${syncInfo.start} --> ${syncInfo.end})`);\n segmentIndex = syncInfo.segmentIndex;\n partIndex = syncInfo.partIndex;\n startTime = syncInfo.start;\n } else {\n this.logger_('chooseNextRequest_ - fallback to a regular segment selection algorithm, based on a syncPoint.'); // fallback\n\n const mediaInfoForTime = Playlist.getMediaInfoForTime({\n exactManifestTimings: this.exactManifestTimings,\n playlist: this.playlist_,\n currentTime: targetTime,\n startingPartIndex: this.syncPoint_.partIndex,\n startingSegmentIndex: this.syncPoint_.segmentIndex,\n startTime: this.syncPoint_.time\n });\n segmentIndex = mediaInfoForTime.segmentIndex;\n partIndex = mediaInfoForTime.partIndex;\n startTime = mediaInfoForTime.startTime;\n }\n next.getMediaInfoForTime = this.fetchAtBuffer_ ? `bufferedEnd ${targetTime}` : `currentTime ${targetTime}`;\n next.mediaIndex = segmentIndex;\n next.startOfSegment = startTime;\n next.partIndex = partIndex;\n this.logger_(`choose next request. Playlist switched and we have a sync point. Media Index: ${next.mediaIndex} `);\n }\n const nextSegment = segments[next.mediaIndex];\n let nextPart = nextSegment && typeof next.partIndex === 'number' && nextSegment.parts && nextSegment.parts[next.partIndex]; // if the next segment index is invalid or\n // the next partIndex is invalid do not choose a next segment.\n\n if (!nextSegment || typeof next.partIndex === 'number' && !nextPart) {\n return null;\n } // if the next segment has parts, and we don't have a partIndex.\n // Set partIndex to 0\n\n if (typeof next.partIndex !== 'number' && nextSegment.parts) {\n next.partIndex = 0;\n nextPart = nextSegment.parts[0];\n } // independentSegments applies to every segment in a playlist. If independentSegments appears in a main playlist,\n // it applies to each segment in each media playlist.\n // https://datatracker.ietf.org/doc/html/draft-pantos-http-live-streaming-23#section-4.3.5.1\n\n const hasIndependentSegments = this.vhs_.playlists && this.vhs_.playlists.main && this.vhs_.playlists.main.independentSegments || this.playlist_.independentSegments; // if we have no buffered data then we need to make sure\n // that the next part we append is \"independent\" if possible.\n // So we check if the previous part is independent, and request\n // it if it is.\n\n if (!bufferedTime && nextPart && !hasIndependentSegments && !nextPart.independent) {\n if (next.partIndex === 0) {\n const lastSegment = segments[next.mediaIndex - 1];\n const lastSegmentLastPart = lastSegment.parts && lastSegment.parts.length && lastSegment.parts[lastSegment.parts.length - 1];\n if (lastSegmentLastPart && lastSegmentLastPart.independent) {\n next.mediaIndex -= 1;\n next.partIndex = lastSegment.parts.length - 1;\n next.independent = 'previous segment';\n }\n } else if (nextSegment.parts[next.partIndex - 1].independent) {\n next.partIndex -= 1;\n next.independent = 'previous part';\n }\n }\n const ended = this.mediaSource_ && this.mediaSource_.readyState === 'ended'; // do not choose a next segment if all of the following:\n // 1. this is the last segment in the playlist\n // 2. end of stream has been called on the media source already\n // 3. the player is not seeking\n\n if (next.mediaIndex >= segments.length - 1 && ended && !this.seeking_()) {\n return null;\n }\n if (this.shouldForceTimestampOffsetAfterResync_) {\n this.shouldForceTimestampOffsetAfterResync_ = false;\n next.forceTimestampOffset = true;\n this.logger_('choose next request. Force timestamp offset after loader resync');\n }\n return this.generateSegmentInfo_(next);\n }\n getSyncInfoFromMediaSequenceSync_(targetTime) {\n if (!this.mediaSequenceSync_) {\n return null;\n } // we should pull the target time to the least available time if we drop out of sync for any reason\n\n const finalTargetTime = Math.max(targetTime, this.mediaSequenceSync_.start);\n if (targetTime !== finalTargetTime) {\n this.logger_(`getSyncInfoFromMediaSequenceSync_. Pulled target time from ${targetTime} to ${finalTargetTime}`);\n }\n const mediaSequenceSyncInfo = this.mediaSequenceSync_.getSyncInfoForTime(finalTargetTime);\n if (!mediaSequenceSyncInfo) {\n // no match at all\n return null;\n }\n if (!mediaSequenceSyncInfo.isAppended) {\n // has a perfect match\n return mediaSequenceSyncInfo;\n } // has match, but segment was already appended.\n // attempt to auto-advance to the nearest next segment:\n\n const nextMediaSequenceSyncInfo = this.mediaSequenceSync_.getSyncInfoForTime(mediaSequenceSyncInfo.end);\n if (!nextMediaSequenceSyncInfo) {\n // no match at all\n return null;\n }\n if (nextMediaSequenceSyncInfo.isAppended) {\n this.logger_('getSyncInfoFromMediaSequenceSync_: We encounter unexpected scenario where next media sequence sync info is also appended!');\n } // got match with the nearest next segment\n\n return nextMediaSequenceSyncInfo;\n }\n generateSegmentInfo_(options) {\n const {\n independent,\n playlist,\n mediaIndex,\n startOfSegment,\n isSyncRequest,\n partIndex,\n forceTimestampOffset,\n getMediaInfoForTime\n } = options;\n const segment = playlist.segments[mediaIndex];\n const part = typeof partIndex === 'number' && segment.parts[partIndex];\n const segmentInfo = {\n requestId: 'segment-loader-' + Math.random(),\n // resolve the segment URL relative to the playlist\n uri: part && part.resolvedUri || segment.resolvedUri,\n // the segment's mediaIndex at the time it was requested\n mediaIndex,\n partIndex: part ? partIndex : null,\n // whether or not to update the SegmentLoader's state with this\n // segment's mediaIndex\n isSyncRequest,\n startOfSegment,\n // the segment's playlist\n playlist,\n // unencrypted bytes of the segment\n bytes: null,\n // when a key is defined for this segment, the encrypted bytes\n encryptedBytes: null,\n // The target timestampOffset for this segment when we append it\n // to the source buffer\n timestampOffset: null,\n // The timeline that the segment is in\n timeline: segment.timeline,\n // The expected duration of the segment in seconds\n duration: part && part.duration || segment.duration,\n // retain the segment in case the playlist updates while doing an async process\n segment,\n part,\n byteLength: 0,\n transmuxer: this.transmuxer_,\n // type of getMediaInfoForTime that was used to get this segment\n getMediaInfoForTime,\n independent\n };\n const overrideCheck = typeof forceTimestampOffset !== 'undefined' ? forceTimestampOffset : this.isPendingTimestampOffset_;\n segmentInfo.timestampOffset = this.timestampOffsetForSegment_({\n segmentTimeline: segment.timeline,\n currentTimeline: this.currentTimeline_,\n startOfSegment,\n buffered: this.buffered_(),\n overrideCheck\n });\n const audioBufferedEnd = lastBufferedEnd(this.sourceUpdater_.audioBuffered());\n if (typeof audioBufferedEnd === 'number') {\n // since the transmuxer is using the actual timing values, but the buffer is\n // adjusted by the timestamp offset, we must adjust the value here\n segmentInfo.audioAppendStart = audioBufferedEnd - this.sourceUpdater_.audioTimestampOffset();\n }\n if (this.sourceUpdater_.videoBuffered().length) {\n segmentInfo.gopsToAlignWith = gopsSafeToAlignWith(this.gopBuffer_,\n // since the transmuxer is using the actual timing values, but the time is\n // adjusted by the timestmap offset, we must adjust the value here\n this.currentTime_() - this.sourceUpdater_.videoTimestampOffset(), this.timeMapping_);\n }\n return segmentInfo;\n } // get the timestampoffset for a segment,\n // added so that vtt segment loader can override and prevent\n // adding timestamp offsets.\n\n timestampOffsetForSegment_(options) {\n return timestampOffsetForSegment(options);\n }\n /**\n * Determines if the network has enough bandwidth to complete the current segment\n * request in a timely manner. If not, the request will be aborted early and bandwidth\n * updated to trigger a playlist switch.\n *\n * @param {Object} stats\n * Object containing stats about the request timing and size\n * @private\n */\n\n earlyAbortWhenNeeded_(stats) {\n if (this.vhs_.tech_.paused() ||\n // Don't abort if the current playlist is on the lowestEnabledRendition\n // TODO: Replace using timeout with a boolean indicating whether this playlist is\n // the lowestEnabledRendition.\n !this.xhrOptions_.timeout ||\n // Don't abort if we have no bandwidth information to estimate segment sizes\n !this.playlist_.attributes.BANDWIDTH) {\n return;\n } // Wait at least 1 second since the first byte of data has been received before\n // using the calculated bandwidth from the progress event to allow the bitrate\n // to stabilize\n\n if (Date.now() - (stats.firstBytesReceivedAt || Date.now()) < 1000) {\n return;\n }\n const currentTime = this.currentTime_();\n const measuredBandwidth = stats.bandwidth;\n const segmentDuration = this.pendingSegment_.duration;\n const requestTimeRemaining = Playlist.estimateSegmentRequestTime(segmentDuration, measuredBandwidth, this.playlist_, stats.bytesReceived); // Subtract 1 from the timeUntilRebuffer so we still consider an early abort\n // if we are only left with less than 1 second when the request completes.\n // A negative timeUntilRebuffering indicates we are already rebuffering\n\n const timeUntilRebuffer$1 = timeUntilRebuffer(this.buffered_(), currentTime, this.vhs_.tech_.playbackRate()) - 1; // Only consider aborting early if the estimated time to finish the download\n // is larger than the estimated time until the player runs out of forward buffer\n\n if (requestTimeRemaining <= timeUntilRebuffer$1) {\n return;\n }\n const switchCandidate = minRebufferMaxBandwidthSelector({\n main: this.vhs_.playlists.main,\n currentTime,\n bandwidth: measuredBandwidth,\n duration: this.duration_(),\n segmentDuration,\n timeUntilRebuffer: timeUntilRebuffer$1,\n currentTimeline: this.currentTimeline_,\n syncController: this.syncController_\n });\n if (!switchCandidate) {\n return;\n }\n const rebufferingImpact = requestTimeRemaining - timeUntilRebuffer$1;\n const timeSavedBySwitching = rebufferingImpact - switchCandidate.rebufferingImpact;\n let minimumTimeSaving = 0.5; // If we are already rebuffering, increase the amount of variance we add to the\n // potential round trip time of the new request so that we are not too aggressive\n // with switching to a playlist that might save us a fraction of a second.\n\n if (timeUntilRebuffer$1 <= TIME_FUDGE_FACTOR) {\n minimumTimeSaving = 1;\n }\n if (!switchCandidate.playlist || switchCandidate.playlist.uri === this.playlist_.uri || timeSavedBySwitching < minimumTimeSaving) {\n return;\n } // set the bandwidth to that of the desired playlist being sure to scale by\n // BANDWIDTH_VARIANCE and add one so the playlist selector does not exclude it\n // don't trigger a bandwidthupdate as the bandwidth is artifial\n\n this.bandwidth = switchCandidate.playlist.attributes.BANDWIDTH * Config.BANDWIDTH_VARIANCE + 1;\n this.trigger('earlyabort');\n }\n handleAbort_(segmentInfo) {\n this.logger_(`Aborting ${segmentInfoString(segmentInfo)}`);\n this.mediaRequestsAborted += 1;\n }\n /**\n * XHR `progress` event handler\n *\n * @param {Event}\n * The XHR `progress` event\n * @param {Object} simpleSegment\n * A simplified segment object copy\n * @private\n */\n\n handleProgress_(event, simpleSegment) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n this.trigger('progress');\n }\n handleTrackInfo_(simpleSegment, trackInfo) {\n const {\n hasAudio,\n hasVideo\n } = trackInfo;\n const metadata = {\n segmentInfo: segmentInfoPayload({\n type: this.loaderType_,\n segment: simpleSegment\n }),\n trackInfo: {\n hasAudio,\n hasVideo\n }\n };\n this.trigger({\n type: 'segmenttransmuxingtrackinfoavailable',\n metadata\n });\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n if (this.checkForIllegalMediaSwitch(trackInfo)) {\n return;\n }\n trackInfo = trackInfo || {}; // When we have track info, determine what media types this loader is dealing with.\n // Guard against cases where we're not getting track info at all until we are\n // certain that all streams will provide it.\n\n if (!shallowEqual(this.currentMediaInfo_, trackInfo)) {\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n this.startingMediaInfo_ = trackInfo;\n this.currentMediaInfo_ = trackInfo;\n this.logger_('trackinfo update', trackInfo);\n this.trigger('trackinfo');\n } // trackinfo may cause an abort if the trackinfo\n // causes a codec change to an unsupported codec.\n\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n } // set trackinfo on the pending segment so that\n // it can append.\n\n this.pendingSegment_.trackInfo = trackInfo; // check if any calls were waiting on the track info\n\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n }\n handleTimingInfo_(simpleSegment, mediaType, timeType, time) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n const segmentInfo = this.pendingSegment_;\n const timingInfoProperty = timingInfoPropertyForMedia(mediaType);\n segmentInfo[timingInfoProperty] = segmentInfo[timingInfoProperty] || {};\n segmentInfo[timingInfoProperty][timeType] = time;\n this.logger_(`timinginfo: ${mediaType} - ${timeType} - ${time}`); // check if any calls were waiting on the timing info\n\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n }\n handleCaptions_(simpleSegment, captionData) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n } // This could only happen with fmp4 segments, but\n // should still not happen in general\n\n if (captionData.length === 0) {\n this.logger_('SegmentLoader received no captions from a caption event');\n return;\n }\n const segmentInfo = this.pendingSegment_; // Wait until we have some video data so that caption timing\n // can be adjusted by the timestamp offset\n\n if (!segmentInfo.hasAppendedData_) {\n this.metadataQueue_.caption.push(this.handleCaptions_.bind(this, simpleSegment, captionData));\n return;\n }\n const timestampOffset = this.sourceUpdater_.videoTimestampOffset() === null ? this.sourceUpdater_.audioTimestampOffset() : this.sourceUpdater_.videoTimestampOffset();\n const captionTracks = {}; // get total start/end and captions for each track/stream\n\n captionData.forEach(caption => {\n // caption.stream is actually a track name...\n // set to the existing values in tracks or default values\n captionTracks[caption.stream] = captionTracks[caption.stream] || {\n // Infinity, as any other value will be less than this\n startTime: Infinity,\n captions: [],\n // 0 as an other value will be more than this\n endTime: 0\n };\n const captionTrack = captionTracks[caption.stream];\n captionTrack.startTime = Math.min(captionTrack.startTime, caption.startTime + timestampOffset);\n captionTrack.endTime = Math.max(captionTrack.endTime, caption.endTime + timestampOffset);\n captionTrack.captions.push(caption);\n });\n Object.keys(captionTracks).forEach(trackName => {\n const {\n startTime,\n endTime,\n captions\n } = captionTracks[trackName];\n const inbandTextTracks = this.inbandTextTracks_;\n this.logger_(`adding cues from ${startTime} -> ${endTime} for ${trackName}`);\n createCaptionsTrackIfNotExists(inbandTextTracks, this.vhs_.tech_, trackName); // clear out any cues that start and end at the same time period for the same track.\n // We do this because a rendition change that also changes the timescale for captions\n // will result in captions being re-parsed for certain segments. If we add them again\n // without clearing we will have two of the same captions visible.\n\n removeCuesFromTrack(startTime, endTime, inbandTextTracks[trackName]);\n addCaptionData({\n captionArray: captions,\n inbandTextTracks,\n timestampOffset\n });\n }); // Reset stored captions since we added parsed\n // captions to a text track at this point\n\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearParsedMp4Captions'\n });\n }\n }\n handleId3_(simpleSegment, id3Frames, dispatchType) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n const segmentInfo = this.pendingSegment_; // we need to have appended data in order for the timestamp offset to be set\n\n if (!segmentInfo.hasAppendedData_) {\n this.metadataQueue_.id3.push(this.handleId3_.bind(this, simpleSegment, id3Frames, dispatchType));\n return;\n }\n this.addMetadataToTextTrack(dispatchType, id3Frames, this.duration_());\n }\n processMetadataQueue_() {\n this.metadataQueue_.id3.forEach(fn => fn());\n this.metadataQueue_.caption.forEach(fn => fn());\n this.metadataQueue_.id3 = [];\n this.metadataQueue_.caption = [];\n }\n processCallQueue_() {\n const callQueue = this.callQueue_; // Clear out the queue before the queued functions are run, since some of the\n // functions may check the length of the load queue and default to pushing themselves\n // back onto the queue.\n\n this.callQueue_ = [];\n callQueue.forEach(fun => fun());\n }\n processLoadQueue_() {\n const loadQueue = this.loadQueue_; // Clear out the queue before the queued functions are run, since some of the\n // functions may check the length of the load queue and default to pushing themselves\n // back onto the queue.\n\n this.loadQueue_ = [];\n loadQueue.forEach(fun => fun());\n }\n /**\n * Determines whether the loader has enough info to load the next segment.\n *\n * @return {boolean}\n * Whether or not the loader has enough info to load the next segment\n */\n\n hasEnoughInfoToLoad_() {\n // Since primary timing goes by video, only the audio loader potentially needs to wait\n // to load.\n if (this.loaderType_ !== 'audio') {\n return true;\n }\n const segmentInfo = this.pendingSegment_; // A fill buffer must have already run to establish a pending segment before there's\n // enough info to load.\n\n if (!segmentInfo) {\n return false;\n } // The first segment can and should be loaded immediately so that source buffers are\n // created together (before appending). Source buffer creation uses the presence of\n // audio and video data to determine whether to create audio/video source buffers, and\n // uses processed (transmuxed or parsed) media to determine the types required.\n\n if (!this.getCurrentMediaInfo_()) {\n return true;\n }\n if (\n // Technically, instead of waiting to load a segment on timeline changes, a segment\n // can be requested and downloaded and only wait before it is transmuxed or parsed.\n // But in practice, there are a few reasons why it is better to wait until a loader\n // is ready to append that segment before requesting and downloading:\n //\n // 1. Because audio and main loaders cross discontinuities together, if this loader\n // is waiting for the other to catch up, then instead of requesting another\n // segment and using up more bandwidth, by not yet loading, more bandwidth is\n // allotted to the loader currently behind.\n // 2. media-segment-request doesn't have to have logic to consider whether a segment\n // is ready to be processed or not, isolating the queueing behavior to the loader.\n // 3. The audio loader bases some of its segment properties on timing information\n // provided by the main loader, meaning that, if the logic for waiting on\n // processing was in media-segment-request, then it would also need to know how\n // to re-generate the segment information after the main loader caught up.\n shouldWaitForTimelineChange({\n timelineChangeController: this.timelineChangeController_,\n currentTimeline: this.currentTimeline_,\n segmentTimeline: segmentInfo.timeline,\n loaderType: this.loaderType_,\n audioDisabled: this.audioDisabled_\n })) {\n if (shouldFixBadTimelineChanges(this.timelineChangeController_)) {\n fixBadTimelineChange(this);\n }\n return false;\n }\n return true;\n }\n getCurrentMediaInfo_(segmentInfo = this.pendingSegment_) {\n return segmentInfo && segmentInfo.trackInfo || this.currentMediaInfo_;\n }\n getMediaInfo_(segmentInfo = this.pendingSegment_) {\n return this.getCurrentMediaInfo_(segmentInfo) || this.startingMediaInfo_;\n }\n getPendingSegmentPlaylist() {\n return this.pendingSegment_ ? this.pendingSegment_.playlist : null;\n }\n hasEnoughInfoToAppend_() {\n if (!this.sourceUpdater_.ready()) {\n return false;\n } // If content needs to be removed or the loader is waiting on an append reattempt,\n // then no additional content should be appended until the prior append is resolved.\n\n if (this.waitingOnRemove_ || this.quotaExceededErrorRetryTimeout_) {\n return false;\n }\n const segmentInfo = this.pendingSegment_;\n const trackInfo = this.getCurrentMediaInfo_(); // no segment to append any data for or\n // we do not have information on this specific\n // segment yet\n\n if (!segmentInfo || !trackInfo) {\n return false;\n }\n const {\n hasAudio,\n hasVideo,\n isMuxed\n } = trackInfo;\n if (hasVideo && !segmentInfo.videoTimingInfo) {\n return false;\n } // muxed content only relies on video timing information for now.\n\n if (hasAudio && !this.audioDisabled_ && !isMuxed && !segmentInfo.audioTimingInfo) {\n return false;\n } // we need to allow an append here even if we're moving to different timelines.\n\n if (shouldWaitForTimelineChange({\n timelineChangeController: this.timelineChangeController_,\n currentTimeline: this.currentTimeline_,\n segmentTimeline: segmentInfo.timeline,\n loaderType: this.loaderType_,\n audioDisabled: this.audioDisabled_\n })) {\n if (shouldFixBadTimelineChanges(this.timelineChangeController_)) {\n fixBadTimelineChange(this);\n }\n return false;\n }\n return true;\n }\n handleData_(simpleSegment, result) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n } // If there's anything in the call queue, then this data came later and should be\n // executed after the calls currently queued.\n\n if (this.callQueue_.length || !this.hasEnoughInfoToAppend_()) {\n this.callQueue_.push(this.handleData_.bind(this, simpleSegment, result));\n return;\n }\n const segmentInfo = this.pendingSegment_; // update the time mapping so we can translate from display time to media time\n\n this.setTimeMapping_(segmentInfo.timeline); // for tracking overall stats\n\n this.updateMediaSecondsLoaded_(segmentInfo.part || segmentInfo.segment); // Note that the state isn't changed from loading to appending. This is because abort\n // logic may change behavior depending on the state, and changing state too early may\n // inflate our estimates of bandwidth. In the future this should be re-examined to\n // note more granular states.\n // don't process and append data if the mediaSource is closed\n\n if (this.mediaSource_.readyState === 'closed') {\n return;\n } // if this request included an initialization segment, save that data\n // to the initSegment cache\n\n if (simpleSegment.map) {\n simpleSegment.map = this.initSegmentForMap(simpleSegment.map, true); // move over init segment properties to media request\n\n segmentInfo.segment.map = simpleSegment.map;\n } // if this request included a segment key, save that data in the cache\n\n if (simpleSegment.key) {\n this.segmentKey(simpleSegment.key, true);\n }\n segmentInfo.isFmp4 = simpleSegment.isFmp4;\n segmentInfo.timingInfo = segmentInfo.timingInfo || {};\n if (segmentInfo.isFmp4) {\n this.trigger('fmp4');\n segmentInfo.timingInfo.start = segmentInfo[timingInfoPropertyForMedia(result.type)].start;\n } else {\n const trackInfo = this.getCurrentMediaInfo_();\n const useVideoTimingInfo = this.loaderType_ === 'main' && trackInfo && trackInfo.hasVideo;\n let firstVideoFrameTimeForData;\n if (useVideoTimingInfo) {\n firstVideoFrameTimeForData = segmentInfo.videoTimingInfo.start;\n } // Segment loader knows more about segment timing than the transmuxer (in certain\n // aspects), so make any changes required for a more accurate start time.\n // Don't set the end time yet, as the segment may not be finished processing.\n\n segmentInfo.timingInfo.start = this.trueSegmentStart_({\n currentStart: segmentInfo.timingInfo.start,\n playlist: segmentInfo.playlist,\n mediaIndex: segmentInfo.mediaIndex,\n currentVideoTimestampOffset: this.sourceUpdater_.videoTimestampOffset(),\n useVideoTimingInfo,\n firstVideoFrameTimeForData,\n videoTimingInfo: segmentInfo.videoTimingInfo,\n audioTimingInfo: segmentInfo.audioTimingInfo\n });\n } // Init segments for audio and video only need to be appended in certain cases. Now\n // that data is about to be appended, we can check the final cases to determine\n // whether we should append an init segment.\n\n this.updateAppendInitSegmentStatus(segmentInfo, result.type); // Timestamp offset should be updated once we get new data and have its timing info,\n // as we use the start of the segment to offset the best guess (playlist provided)\n // timestamp offset.\n\n this.updateSourceBufferTimestampOffset_(segmentInfo); // if this is a sync request we need to determine whether it should\n // be appended or not.\n\n if (segmentInfo.isSyncRequest) {\n // first save/update our timing info for this segment.\n // this is what allows us to choose an accurate segment\n // and the main reason we make a sync request.\n this.updateTimingInfoEnd_(segmentInfo);\n this.syncController_.saveSegmentTimingInfo({\n segmentInfo,\n shouldSaveTimelineMapping: this.loaderType_ === 'main'\n });\n const next = this.chooseNextRequest_(); // If the sync request isn't the segment that would be requested next\n // after taking into account its timing info, do not append it.\n\n if (next.mediaIndex !== segmentInfo.mediaIndex || next.partIndex !== segmentInfo.partIndex) {\n this.logger_('sync segment was incorrect, not appending');\n return;\n } // otherwise append it like any other segment as our guess was correct.\n\n this.logger_('sync segment was correct, appending');\n } // Save some state so that in the future anything waiting on first append (and/or\n // timestamp offset(s)) can process immediately. While the extra state isn't optimal,\n // we need some notion of whether the timestamp offset or other relevant information\n // has had a chance to be set.\n\n segmentInfo.hasAppendedData_ = true; // Now that the timestamp offset should be set, we can append any waiting ID3 tags.\n\n this.processMetadataQueue_();\n this.appendData_(segmentInfo, result);\n }\n updateAppendInitSegmentStatus(segmentInfo, type) {\n // alt audio doesn't manage timestamp offset\n if (this.loaderType_ === 'main' && typeof segmentInfo.timestampOffset === 'number' &&\n // in the case that we're handling partial data, we don't want to append an init\n // segment for each chunk\n !segmentInfo.changedTimestampOffset) {\n // if the timestamp offset changed, the timeline may have changed, so we have to re-\n // append init segments\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n }\n if (this.playlistOfLastInitSegment_[type] !== segmentInfo.playlist) {\n // make sure we append init segment on playlist changes, in case the media config\n // changed\n this.appendInitSegment_[type] = true;\n }\n }\n getInitSegmentAndUpdateState_({\n type,\n initSegment,\n map,\n playlist\n }) {\n // \"The EXT-X-MAP tag specifies how to obtain the Media Initialization Section\n // (Section 3) required to parse the applicable Media Segments. It applies to every\n // Media Segment that appears after it in the Playlist until the next EXT-X-MAP tag\n // or until the end of the playlist.\"\n // https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.2.5\n if (map) {\n const id = initSegmentId(map);\n if (this.activeInitSegmentId_ === id) {\n // don't need to re-append the init segment if the ID matches\n return null;\n } // a map-specified init segment takes priority over any transmuxed (or otherwise\n // obtained) init segment\n //\n // this also caches the init segment for later use\n\n initSegment = this.initSegmentForMap(map, true).bytes;\n this.activeInitSegmentId_ = id;\n } // We used to always prepend init segments for video, however, that shouldn't be\n // necessary. Instead, we should only append on changes, similar to what we've always\n // done for audio. This is more important (though may not be that important) for\n // frame-by-frame appending for LHLS, simply because of the increased quantity of\n // appends.\n\n if (initSegment && this.appendInitSegment_[type]) {\n // Make sure we track the playlist that we last used for the init segment, so that\n // we can re-append the init segment in the event that we get data from a new\n // playlist. Discontinuities and track changes are handled in other sections.\n this.playlistOfLastInitSegment_[type] = playlist; // Disable future init segment appends for this type. Until a change is necessary.\n\n this.appendInitSegment_[type] = false; // we need to clear out the fmp4 active init segment id, since\n // we are appending the muxer init segment\n\n this.activeInitSegmentId_ = null;\n return initSegment;\n }\n return null;\n }\n handleQuotaExceededError_({\n segmentInfo,\n type,\n bytes\n }, error) {\n const audioBuffered = this.sourceUpdater_.audioBuffered();\n const videoBuffered = this.sourceUpdater_.videoBuffered(); // For now we're ignoring any notion of gaps in the buffer, but they, in theory,\n // should be cleared out during the buffer removals. However, log in case it helps\n // debug.\n\n if (audioBuffered.length > 1) {\n this.logger_('On QUOTA_EXCEEDED_ERR, found gaps in the audio buffer: ' + timeRangesToArray(audioBuffered).join(', '));\n }\n if (videoBuffered.length > 1) {\n this.logger_('On QUOTA_EXCEEDED_ERR, found gaps in the video buffer: ' + timeRangesToArray(videoBuffered).join(', '));\n }\n const audioBufferStart = audioBuffered.length ? audioBuffered.start(0) : 0;\n const audioBufferEnd = audioBuffered.length ? audioBuffered.end(audioBuffered.length - 1) : 0;\n const videoBufferStart = videoBuffered.length ? videoBuffered.start(0) : 0;\n const videoBufferEnd = videoBuffered.length ? videoBuffered.end(videoBuffered.length - 1) : 0;\n if (audioBufferEnd - audioBufferStart <= MIN_BACK_BUFFER && videoBufferEnd - videoBufferStart <= MIN_BACK_BUFFER) {\n // Can't remove enough buffer to make room for new segment (or the browser doesn't\n // allow for appends of segments this size). In the future, it may be possible to\n // split up the segment and append in pieces, but for now, error out this playlist\n // in an attempt to switch to a more manageable rendition.\n this.logger_('On QUOTA_EXCEEDED_ERR, single segment too large to append to ' + 'buffer, triggering an error. ' + `Appended byte length: ${bytes.byteLength}, ` + `audio buffer: ${timeRangesToArray(audioBuffered).join(', ')}, ` + `video buffer: ${timeRangesToArray(videoBuffered).join(', ')}, `);\n this.error({\n message: 'Quota exceeded error with append of a single segment of content',\n excludeUntil: Infinity\n });\n this.trigger('error');\n return;\n } // To try to resolve the quota exceeded error, clear back buffer and retry. This means\n // that the segment-loader should block on future events until this one is handled, so\n // that it doesn't keep moving onto further segments. Adding the call to the call\n // queue will prevent further appends until waitingOnRemove_ and\n // quotaExceededErrorRetryTimeout_ are cleared.\n //\n // Note that this will only block the current loader. In the case of demuxed content,\n // the other load may keep filling as fast as possible. In practice, this should be\n // OK, as it is a rare case when either audio has a high enough bitrate to fill up a\n // source buffer, or video fills without enough room for audio to append (and without\n // the availability of clearing out seconds of back buffer to make room for audio).\n // But it might still be good to handle this case in the future as a TODO.\n\n this.waitingOnRemove_ = true;\n this.callQueue_.push(this.appendToSourceBuffer_.bind(this, {\n segmentInfo,\n type,\n bytes\n }));\n const currentTime = this.currentTime_(); // Try to remove as much audio and video as possible to make room for new content\n // before retrying.\n\n const timeToRemoveUntil = currentTime - MIN_BACK_BUFFER;\n this.logger_(`On QUOTA_EXCEEDED_ERR, removing audio/video from 0 to ${timeToRemoveUntil}`);\n this.remove(0, timeToRemoveUntil, () => {\n this.logger_(`On QUOTA_EXCEEDED_ERR, retrying append in ${MIN_BACK_BUFFER}s`);\n this.waitingOnRemove_ = false; // wait the length of time alotted in the back buffer to prevent wasted\n // attempts (since we can't clear less than the minimum)\n\n this.quotaExceededErrorRetryTimeout_ = window$1.setTimeout(() => {\n this.logger_('On QUOTA_EXCEEDED_ERR, re-processing call queue');\n this.quotaExceededErrorRetryTimeout_ = null;\n this.processCallQueue_();\n }, MIN_BACK_BUFFER * 1000);\n }, true);\n }\n handleAppendError_({\n segmentInfo,\n type,\n bytes\n }, error) {\n // if there's no error, nothing to do\n if (!error) {\n return;\n }\n if (error.code === QUOTA_EXCEEDED_ERR) {\n this.handleQuotaExceededError_({\n segmentInfo,\n type,\n bytes\n }); // A quota exceeded error should be recoverable with a future re-append, so no need\n // to trigger an append error.\n\n return;\n }\n this.logger_('Received non QUOTA_EXCEEDED_ERR on append', error); // If an append errors, we often can't recover.\n // (see https://w3c.github.io/media-source/#sourcebuffer-append-error).\n //\n // Trigger a special error so that it can be handled separately from normal,\n // recoverable errors.\n\n this.error({\n message: `${type} append of ${bytes.length}b failed for segment ` + `#${segmentInfo.mediaIndex} in playlist ${segmentInfo.playlist.id}`,\n metadata: {\n errorType: videojs.Error.StreamingFailedToAppendSegment\n }\n });\n this.trigger('appenderror');\n }\n appendToSourceBuffer_({\n segmentInfo,\n type,\n initSegment,\n data,\n bytes\n }) {\n // If this is a re-append, bytes were already created and don't need to be recreated\n if (!bytes) {\n const segments = [data];\n let byteLength = data.byteLength;\n if (initSegment) {\n // if the media initialization segment is changing, append it before the content\n // segment\n segments.unshift(initSegment);\n byteLength += initSegment.byteLength;\n } // Technically we should be OK appending the init segment separately, however, we\n // haven't yet tested that, and prepending is how we have always done things.\n\n bytes = concatSegments({\n bytes: byteLength,\n segments\n });\n }\n const metadata = {\n segmentInfo: segmentInfoPayload({\n type: this.loaderType_,\n segment: segmentInfo\n })\n };\n this.trigger({\n type: 'segmentappendstart',\n metadata\n });\n this.sourceUpdater_.appendBuffer({\n segmentInfo,\n type,\n bytes\n }, this.handleAppendError_.bind(this, {\n segmentInfo,\n type,\n bytes\n }));\n }\n handleSegmentTimingInfo_(type, requestId, segmentTimingInfo) {\n if (!this.pendingSegment_ || requestId !== this.pendingSegment_.requestId) {\n return;\n }\n const segment = this.pendingSegment_.segment;\n const timingInfoProperty = `${type}TimingInfo`;\n if (!segment[timingInfoProperty]) {\n segment[timingInfoProperty] = {};\n }\n segment[timingInfoProperty].transmuxerPrependedSeconds = segmentTimingInfo.prependedContentDuration || 0;\n segment[timingInfoProperty].transmuxedPresentationStart = segmentTimingInfo.start.presentation;\n segment[timingInfoProperty].transmuxedDecodeStart = segmentTimingInfo.start.decode;\n segment[timingInfoProperty].transmuxedPresentationEnd = segmentTimingInfo.end.presentation;\n segment[timingInfoProperty].transmuxedDecodeEnd = segmentTimingInfo.end.decode; // mainly used as a reference for debugging\n\n segment[timingInfoProperty].baseMediaDecodeTime = segmentTimingInfo.baseMediaDecodeTime;\n }\n appendData_(segmentInfo, result) {\n const {\n type,\n data\n } = result;\n if (!data || !data.byteLength) {\n return;\n }\n if (type === 'audio' && this.audioDisabled_) {\n return;\n }\n const initSegment = this.getInitSegmentAndUpdateState_({\n type,\n initSegment: result.initSegment,\n playlist: segmentInfo.playlist,\n map: segmentInfo.isFmp4 ? segmentInfo.segment.map : null\n });\n this.appendToSourceBuffer_({\n segmentInfo,\n type,\n initSegment,\n data\n });\n }\n /**\n * load a specific segment from a request into the buffer\n *\n * @private\n */\n\n loadSegment_(segmentInfo) {\n this.state = 'WAITING';\n this.pendingSegment_ = segmentInfo;\n this.trimBackBuffer_(segmentInfo);\n if (typeof segmentInfo.timestampOffset === 'number') {\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearAllMp4Captions'\n });\n }\n }\n if (!this.hasEnoughInfoToLoad_()) {\n this.loadQueue_.push(() => {\n // regenerate the audioAppendStart, timestampOffset, etc as they\n // may have changed since this function was added to the queue.\n const options = _extends({}, segmentInfo, {\n forceTimestampOffset: true\n });\n _extends(segmentInfo, this.generateSegmentInfo_(options));\n this.isPendingTimestampOffset_ = false;\n this.updateTransmuxerAndRequestSegment_(segmentInfo);\n });\n return;\n }\n this.updateTransmuxerAndRequestSegment_(segmentInfo);\n }\n updateTransmuxerAndRequestSegment_(segmentInfo) {\n // We'll update the source buffer's timestamp offset once we have transmuxed data, but\n // the transmuxer still needs to be updated before then.\n //\n // Even though keepOriginalTimestamps is set to true for the transmuxer, timestamp\n // offset must be passed to the transmuxer for stream correcting adjustments.\n if (this.shouldUpdateTransmuxerTimestampOffset_(segmentInfo.timestampOffset)) {\n this.gopBuffer_.length = 0; // gopsToAlignWith was set before the GOP buffer was cleared\n\n segmentInfo.gopsToAlignWith = [];\n this.timeMapping_ = 0; // reset values in the transmuxer since a discontinuity should start fresh\n\n this.transmuxer_.postMessage({\n action: 'reset'\n });\n this.transmuxer_.postMessage({\n action: 'setTimestampOffset',\n timestampOffset: segmentInfo.timestampOffset\n });\n }\n const simpleSegment = this.createSimplifiedSegmentObj_(segmentInfo);\n const isEndOfStream = this.isEndOfStream_(segmentInfo.mediaIndex, segmentInfo.playlist, segmentInfo.partIndex);\n const isWalkingForward = this.mediaIndex !== null;\n const isDiscontinuity = segmentInfo.timeline !== this.currentTimeline_ &&\n // currentTimeline starts at -1, so we shouldn't end the timeline switching to 0,\n // the first timeline\n segmentInfo.timeline > 0;\n const isEndOfTimeline = isEndOfStream || isWalkingForward && isDiscontinuity;\n this.logger_(`Requesting\n${compactSegmentUrlDescription(segmentInfo.uri)}\n${segmentInfoString(segmentInfo)}`); // If there's an init segment associated with this segment, but it is not cached (identified by a lack of bytes),\n // then this init segment has never been seen before and should be appended.\n //\n // At this point the content type (audio/video or both) is not yet known, but it should be safe to set\n // both to true and leave the decision of whether to append the init segment to append time.\n\n if (simpleSegment.map && !simpleSegment.map.bytes) {\n this.logger_('going to request init segment.');\n this.appendInitSegment_ = {\n video: true,\n audio: true\n };\n }\n segmentInfo.abortRequests = mediaSegmentRequest({\n xhr: this.vhs_.xhr,\n xhrOptions: this.xhrOptions_,\n decryptionWorker: this.decrypter_,\n segment: simpleSegment,\n abortFn: this.handleAbort_.bind(this, segmentInfo),\n progressFn: this.handleProgress_.bind(this),\n trackInfoFn: this.handleTrackInfo_.bind(this),\n timingInfoFn: this.handleTimingInfo_.bind(this),\n videoSegmentTimingInfoFn: this.handleSegmentTimingInfo_.bind(this, 'video', segmentInfo.requestId),\n audioSegmentTimingInfoFn: this.handleSegmentTimingInfo_.bind(this, 'audio', segmentInfo.requestId),\n captionsFn: this.handleCaptions_.bind(this),\n isEndOfTimeline,\n endedTimelineFn: () => {\n this.logger_('received endedtimeline callback');\n },\n id3Fn: this.handleId3_.bind(this),\n dataFn: this.handleData_.bind(this),\n doneFn: this.segmentRequestFinished_.bind(this),\n onTransmuxerLog: ({\n message,\n level,\n stream\n }) => {\n this.logger_(`${segmentInfoString(segmentInfo)} logged from transmuxer stream ${stream} as a ${level}: ${message}`);\n },\n triggerSegmentEventFn: ({\n type,\n segment,\n keyInfo,\n trackInfo,\n timingInfo\n }) => {\n const segInfo = segmentInfoPayload({\n segment\n });\n const metadata = {\n segmentInfo: segInfo\n }; // add other properties if necessary.\n\n if (keyInfo) {\n metadata.keyInfo = keyInfo;\n }\n if (trackInfo) {\n metadata.trackInfo = trackInfo;\n }\n if (timingInfo) {\n metadata.timingInfo = timingInfo;\n }\n this.trigger({\n type,\n metadata\n });\n }\n });\n }\n /**\n * trim the back buffer so that we don't have too much data\n * in the source buffer\n *\n * @private\n *\n * @param {Object} segmentInfo - the current segment\n */\n\n trimBackBuffer_(segmentInfo) {\n const removeToTime = safeBackBufferTrimTime(this.seekable_(), this.currentTime_(), this.playlist_.targetDuration || 10); // Chrome has a hard limit of 150MB of\n // buffer and a very conservative \"garbage collector\"\n // We manually clear out the old buffer to ensure\n // we don't trigger the QuotaExceeded error\n // on the source buffer during subsequent appends\n\n if (removeToTime > 0) {\n this.remove(0, removeToTime);\n }\n }\n /**\n * created a simplified copy of the segment object with just the\n * information necessary to perform the XHR and decryption\n *\n * @private\n *\n * @param {Object} segmentInfo - the current segment\n * @return {Object} a simplified segment object copy\n */\n\n createSimplifiedSegmentObj_(segmentInfo) {\n const segment = segmentInfo.segment;\n const part = segmentInfo.part;\n const isEncrypted = segmentInfo.segment.key || segmentInfo.segment.map && segmentInfo.segment.map.key;\n const isMediaInitialization = segmentInfo.segment.map && !segmentInfo.segment.map.bytes;\n const simpleSegment = {\n resolvedUri: part ? part.resolvedUri : segment.resolvedUri,\n byterange: part ? part.byterange : segment.byterange,\n requestId: segmentInfo.requestId,\n transmuxer: segmentInfo.transmuxer,\n audioAppendStart: segmentInfo.audioAppendStart,\n gopsToAlignWith: segmentInfo.gopsToAlignWith,\n part: segmentInfo.part,\n type: this.loaderType_,\n start: segmentInfo.startOfSegment,\n duration: segmentInfo.duration,\n isEncrypted,\n isMediaInitialization\n };\n const previousSegment = segmentInfo.playlist.segments[segmentInfo.mediaIndex - 1];\n if (previousSegment && previousSegment.timeline === segment.timeline) {\n // The baseStartTime of a segment is used to handle rollover when probing the TS\n // segment to retrieve timing information. Since the probe only looks at the media's\n // times (e.g., PTS and DTS values of the segment), and doesn't consider the\n // player's time (e.g., player.currentTime()), baseStartTime should reflect the\n // media time as well. transmuxedDecodeEnd represents the end time of a segment, in\n // seconds of media time, so should be used here. The previous segment is used since\n // the end of the previous segment should represent the beginning of the current\n // segment, so long as they are on the same timeline.\n if (previousSegment.videoTimingInfo) {\n simpleSegment.baseStartTime = previousSegment.videoTimingInfo.transmuxedDecodeEnd;\n } else if (previousSegment.audioTimingInfo) {\n simpleSegment.baseStartTime = previousSegment.audioTimingInfo.transmuxedDecodeEnd;\n }\n }\n if (segment.key) {\n // if the media sequence is greater than 2^32, the IV will be incorrect\n // assuming 10s segments, that would be about 1300 years\n const iv = segment.key.iv || new Uint32Array([0, 0, 0, segmentInfo.mediaIndex + segmentInfo.playlist.mediaSequence]);\n simpleSegment.key = this.segmentKey(segment.key);\n simpleSegment.key.iv = iv;\n }\n if (segment.map) {\n simpleSegment.map = this.initSegmentForMap(segment.map);\n }\n return simpleSegment;\n }\n saveTransferStats_(stats) {\n // every request counts as a media request even if it has been aborted\n // or canceled due to a timeout\n this.mediaRequests += 1;\n if (stats) {\n this.mediaBytesTransferred += stats.bytesReceived;\n this.mediaTransferDuration += stats.roundTripTime;\n }\n }\n saveBandwidthRelatedStats_(duration, stats) {\n // byteLength will be used for throughput, and should be based on bytes receieved,\n // which we only know at the end of the request and should reflect total bytes\n // downloaded rather than just bytes processed from components of the segment\n this.pendingSegment_.byteLength = stats.bytesReceived;\n if (duration < MIN_SEGMENT_DURATION_TO_SAVE_STATS) {\n this.logger_(`Ignoring segment's bandwidth because its duration of ${duration}` + ` is less than the min to record ${MIN_SEGMENT_DURATION_TO_SAVE_STATS}`);\n return;\n }\n const metadata = {\n bandwidthInfo: {\n from: this.bandwidth,\n to: stats.bandwidth\n }\n }; // player event with payload\n\n this.trigger({\n type: 'bandwidthupdated',\n metadata\n });\n this.bandwidth = stats.bandwidth;\n this.roundTrip = stats.roundTripTime;\n }\n handleTimeout_() {\n // although the VTT segment loader bandwidth isn't really used, it's good to\n // maintain functinality between segment loaders\n this.mediaRequestsTimedout += 1;\n this.bandwidth = 1;\n this.roundTrip = NaN;\n this.trigger('bandwidthupdate');\n this.trigger('timeout');\n }\n /**\n * Handle the callback from the segmentRequest function and set the\n * associated SegmentLoader state and errors if necessary\n *\n * @private\n */\n\n segmentRequestFinished_(error, simpleSegment, result) {\n // TODO handle special cases, e.g., muxed audio/video but only audio in the segment\n // check the call queue directly since this function doesn't need to deal with any\n // data, and can continue even if the source buffers are not set up and we didn't get\n // any data from the segment\n if (this.callQueue_.length) {\n this.callQueue_.push(this.segmentRequestFinished_.bind(this, error, simpleSegment, result));\n return;\n }\n this.saveTransferStats_(simpleSegment.stats); // The request was aborted and the SegmentLoader has already been reset\n\n if (!this.pendingSegment_) {\n return;\n } // the request was aborted and the SegmentLoader has already started\n // another request. this can happen when the timeout for an aborted\n // request triggers due to a limitation in the XHR library\n // do not count this as any sort of request or we risk double-counting\n\n if (simpleSegment.requestId !== this.pendingSegment_.requestId) {\n return;\n } // an error occurred from the active pendingSegment_ so reset everything\n\n if (error) {\n this.pendingSegment_ = null;\n this.state = 'READY'; // aborts are not a true error condition and nothing corrective needs to be done\n\n if (error.code === REQUEST_ERRORS.ABORTED) {\n return;\n }\n this.pause(); // the error is really just that at least one of the requests timed-out\n // set the bandwidth to a very low value and trigger an ABR switch to\n // take emergency action\n\n if (error.code === REQUEST_ERRORS.TIMEOUT) {\n this.handleTimeout_();\n return;\n } // if control-flow has arrived here, then the error is real\n // emit an error event to exclude the current playlist\n\n this.mediaRequestsErrored += 1;\n this.error(error);\n this.trigger('error');\n return;\n }\n const segmentInfo = this.pendingSegment_; // the response was a success so set any bandwidth stats the request\n // generated for ABR purposes\n\n this.saveBandwidthRelatedStats_(segmentInfo.duration, simpleSegment.stats);\n segmentInfo.endOfAllRequests = simpleSegment.endOfAllRequests;\n if (result.gopInfo) {\n this.gopBuffer_ = updateGopBuffer(this.gopBuffer_, result.gopInfo, this.safeAppend_);\n } // Although we may have already started appending on progress, we shouldn't switch the\n // state away from loading until we are officially done loading the segment data.\n\n this.state = 'APPENDING'; // used for testing\n\n this.trigger('appending');\n this.waitForAppendsToComplete_(segmentInfo);\n }\n setTimeMapping_(timeline) {\n const timelineMapping = this.syncController_.mappingForTimeline(timeline);\n if (timelineMapping !== null) {\n this.timeMapping_ = timelineMapping;\n }\n }\n updateMediaSecondsLoaded_(segment) {\n if (typeof segment.start === 'number' && typeof segment.end === 'number') {\n this.mediaSecondsLoaded += segment.end - segment.start;\n } else {\n this.mediaSecondsLoaded += segment.duration;\n }\n }\n shouldUpdateTransmuxerTimestampOffset_(timestampOffset) {\n if (timestampOffset === null) {\n return false;\n } // note that we're potentially using the same timestamp offset for both video and\n // audio\n\n if (this.loaderType_ === 'main' && timestampOffset !== this.sourceUpdater_.videoTimestampOffset()) {\n return true;\n }\n if (!this.audioDisabled_ && timestampOffset !== this.sourceUpdater_.audioTimestampOffset()) {\n return true;\n }\n return false;\n }\n trueSegmentStart_({\n currentStart,\n playlist,\n mediaIndex,\n firstVideoFrameTimeForData,\n currentVideoTimestampOffset,\n useVideoTimingInfo,\n videoTimingInfo,\n audioTimingInfo\n }) {\n if (typeof currentStart !== 'undefined') {\n // if start was set once, keep using it\n return currentStart;\n }\n if (!useVideoTimingInfo) {\n return audioTimingInfo.start;\n }\n const previousSegment = playlist.segments[mediaIndex - 1]; // The start of a segment should be the start of the first full frame contained\n // within that segment. Since the transmuxer maintains a cache of incomplete data\n // from and/or the last frame seen, the start time may reflect a frame that starts\n // in the previous segment. Check for that case and ensure the start time is\n // accurate for the segment.\n\n if (mediaIndex === 0 || !previousSegment || typeof previousSegment.start === 'undefined' || previousSegment.end !== firstVideoFrameTimeForData + currentVideoTimestampOffset) {\n return firstVideoFrameTimeForData;\n }\n return videoTimingInfo.start;\n }\n waitForAppendsToComplete_(segmentInfo) {\n const trackInfo = this.getCurrentMediaInfo_(segmentInfo);\n if (!trackInfo) {\n this.error({\n message: 'No starting media returned, likely due to an unsupported media format.',\n playlistExclusionDuration: Infinity\n });\n this.trigger('error');\n return;\n } // Although transmuxing is done, appends may not yet be finished. Throw a marker\n // on each queue this loader is responsible for to ensure that the appends are\n // complete.\n\n const {\n hasAudio,\n hasVideo,\n isMuxed\n } = trackInfo;\n const waitForVideo = this.loaderType_ === 'main' && hasVideo;\n const waitForAudio = !this.audioDisabled_ && hasAudio && !isMuxed;\n segmentInfo.waitingOnAppends = 0; // segments with no data\n\n if (!segmentInfo.hasAppendedData_) {\n if (!segmentInfo.timingInfo && typeof segmentInfo.timestampOffset === 'number') {\n // When there's no audio or video data in the segment, there's no audio or video\n // timing information.\n //\n // If there's no audio or video timing information, then the timestamp offset\n // can't be adjusted to the appropriate value for the transmuxer and source\n // buffers.\n //\n // Therefore, the next segment should be used to set the timestamp offset.\n this.isPendingTimestampOffset_ = true;\n } // override settings for metadata only segments\n\n segmentInfo.timingInfo = {\n start: 0\n };\n segmentInfo.waitingOnAppends++;\n if (!this.isPendingTimestampOffset_) {\n // update the timestampoffset\n this.updateSourceBufferTimestampOffset_(segmentInfo); // make sure the metadata queue is processed even though we have\n // no video/audio data.\n\n this.processMetadataQueue_();\n } // append is \"done\" instantly with no data.\n\n this.checkAppendsDone_(segmentInfo);\n return;\n } // Since source updater could call back synchronously, do the increments first.\n\n if (waitForVideo) {\n segmentInfo.waitingOnAppends++;\n }\n if (waitForAudio) {\n segmentInfo.waitingOnAppends++;\n }\n if (waitForVideo) {\n this.sourceUpdater_.videoQueueCallback(this.checkAppendsDone_.bind(this, segmentInfo));\n }\n if (waitForAudio) {\n this.sourceUpdater_.audioQueueCallback(this.checkAppendsDone_.bind(this, segmentInfo));\n }\n }\n checkAppendsDone_(segmentInfo) {\n if (this.checkForAbort_(segmentInfo.requestId)) {\n return;\n }\n segmentInfo.waitingOnAppends--;\n if (segmentInfo.waitingOnAppends === 0) {\n this.handleAppendsDone_();\n }\n }\n checkForIllegalMediaSwitch(trackInfo) {\n const illegalMediaSwitchError = illegalMediaSwitch(this.loaderType_, this.getCurrentMediaInfo_(), trackInfo);\n if (illegalMediaSwitchError) {\n this.error({\n message: illegalMediaSwitchError,\n playlistExclusionDuration: Infinity\n });\n this.trigger('error');\n return true;\n }\n return false;\n }\n updateSourceBufferTimestampOffset_(segmentInfo) {\n if (segmentInfo.timestampOffset === null ||\n // we don't yet have the start for whatever media type (video or audio) has\n // priority, timing-wise, so we must wait\n typeof segmentInfo.timingInfo.start !== 'number' ||\n // already updated the timestamp offset for this segment\n segmentInfo.changedTimestampOffset ||\n // the alt audio loader should not be responsible for setting the timestamp offset\n this.loaderType_ !== 'main') {\n return;\n }\n let didChange = false; // Primary timing goes by video, and audio is trimmed in the transmuxer, meaning that\n // the timing info here comes from video. In the event that the audio is longer than\n // the video, this will trim the start of the audio.\n // This also trims any offset from 0 at the beginning of the media\n\n segmentInfo.timestampOffset -= this.getSegmentStartTimeForTimestampOffsetCalculation_({\n videoTimingInfo: segmentInfo.segment.videoTimingInfo,\n audioTimingInfo: segmentInfo.segment.audioTimingInfo,\n timingInfo: segmentInfo.timingInfo\n }); // In the event that there are part segment downloads, each will try to update the\n // timestamp offset. Retaining this bit of state prevents us from updating in the\n // future (within the same segment), however, there may be a better way to handle it.\n\n segmentInfo.changedTimestampOffset = true;\n if (segmentInfo.timestampOffset !== this.sourceUpdater_.videoTimestampOffset()) {\n this.sourceUpdater_.videoTimestampOffset(segmentInfo.timestampOffset);\n didChange = true;\n }\n if (segmentInfo.timestampOffset !== this.sourceUpdater_.audioTimestampOffset()) {\n this.sourceUpdater_.audioTimestampOffset(segmentInfo.timestampOffset);\n didChange = true;\n }\n if (didChange) {\n this.trigger('timestampoffset');\n }\n }\n getSegmentStartTimeForTimestampOffsetCalculation_({\n videoTimingInfo,\n audioTimingInfo,\n timingInfo\n }) {\n if (!this.useDtsForTimestampOffset_) {\n return timingInfo.start;\n }\n if (videoTimingInfo && typeof videoTimingInfo.transmuxedDecodeStart === 'number') {\n return videoTimingInfo.transmuxedDecodeStart;\n } // handle audio only\n\n if (audioTimingInfo && typeof audioTimingInfo.transmuxedDecodeStart === 'number') {\n return audioTimingInfo.transmuxedDecodeStart;\n } // handle content not transmuxed (e.g., MP4)\n\n return timingInfo.start;\n }\n updateTimingInfoEnd_(segmentInfo) {\n segmentInfo.timingInfo = segmentInfo.timingInfo || {};\n const trackInfo = this.getMediaInfo_();\n const useVideoTimingInfo = this.loaderType_ === 'main' && trackInfo && trackInfo.hasVideo;\n const prioritizedTimingInfo = useVideoTimingInfo && segmentInfo.videoTimingInfo ? segmentInfo.videoTimingInfo : segmentInfo.audioTimingInfo;\n if (!prioritizedTimingInfo) {\n return;\n }\n segmentInfo.timingInfo.end = typeof prioritizedTimingInfo.end === 'number' ?\n // End time may not exist in a case where we aren't parsing the full segment (one\n // current example is the case of fmp4), so use the rough duration to calculate an\n // end time.\n prioritizedTimingInfo.end : prioritizedTimingInfo.start + segmentInfo.duration;\n }\n /**\n * callback to run when appendBuffer is finished. detects if we are\n * in a good state to do things with the data we got, or if we need\n * to wait for more\n *\n * @private\n */\n\n handleAppendsDone_() {\n // appendsdone can cause an abort\n if (this.pendingSegment_) {\n const metadata = {\n segmentInfo: segmentInfoPayload({\n type: this.loaderType_,\n segment: this.pendingSegment_\n })\n };\n this.trigger({\n type: 'appendsdone',\n metadata\n });\n }\n if (!this.pendingSegment_) {\n this.state = 'READY'; // TODO should this move into this.checkForAbort to speed up requests post abort in\n // all appending cases?\n\n if (!this.paused()) {\n this.monitorBuffer_();\n }\n return;\n }\n const segmentInfo = this.pendingSegment_;\n if (segmentInfo.part && segmentInfo.part.syncInfo) {\n // low-latency flow\n segmentInfo.part.syncInfo.markAppended();\n } else if (segmentInfo.segment.syncInfo) {\n // normal flow\n segmentInfo.segment.syncInfo.markAppended();\n } // Now that the end of the segment has been reached, we can set the end time. It's\n // best to wait until all appends are done so we're sure that the primary media is\n // finished (and we have its end time).\n\n this.updateTimingInfoEnd_(segmentInfo);\n if (this.shouldSaveSegmentTimingInfo_) {\n // Timeline mappings should only be saved for the main loader. This is for multiple\n // reasons:\n //\n // 1) Only one mapping is saved per timeline, meaning that if both the audio loader\n // and the main loader try to save the timeline mapping, whichever comes later\n // will overwrite the first. In theory this is OK, as the mappings should be the\n // same, however, it breaks for (2)\n // 2) In the event of a live stream, the initial live point will make for a somewhat\n // arbitrary mapping. If audio and video streams are not perfectly in-sync, then\n // the mapping will be off for one of the streams, dependent on which one was\n // first saved (see (1)).\n // 3) Primary timing goes by video in VHS, so the mapping should be video.\n //\n // Since the audio loader will wait for the main loader to load the first segment,\n // the main loader will save the first timeline mapping, and ensure that there won't\n // be a case where audio loads two segments without saving a mapping (thus leading\n // to missing segment timing info).\n this.syncController_.saveSegmentTimingInfo({\n segmentInfo,\n shouldSaveTimelineMapping: this.loaderType_ === 'main'\n });\n }\n const segmentDurationMessage = getTroublesomeSegmentDurationMessage(segmentInfo, this.sourceType_);\n if (segmentDurationMessage) {\n if (segmentDurationMessage.severity === 'warn') {\n videojs.log.warn(segmentDurationMessage.message);\n } else {\n this.logger_(segmentDurationMessage.message);\n }\n }\n this.recordThroughput_(segmentInfo);\n this.pendingSegment_ = null;\n this.state = 'READY';\n if (segmentInfo.isSyncRequest) {\n this.trigger('syncinfoupdate'); // if the sync request was not appended\n // then it was not the correct segment.\n // throw it away and use the data it gave us\n // to get the correct one.\n\n if (!segmentInfo.hasAppendedData_) {\n this.logger_(`Throwing away un-appended sync request ${segmentInfoString(segmentInfo)}`);\n return;\n }\n }\n this.logger_(`Appended ${segmentInfoString(segmentInfo)}`);\n this.addSegmentMetadataCue_(segmentInfo);\n this.fetchAtBuffer_ = true;\n if (this.currentTimeline_ !== segmentInfo.timeline) {\n this.timelineChangeController_.lastTimelineChange({\n type: this.loaderType_,\n from: this.currentTimeline_,\n to: segmentInfo.timeline\n }); // If audio is not disabled, the main segment loader is responsible for updating\n // the audio timeline as well. If the content is video only, this won't have any\n // impact.\n\n if (this.loaderType_ === 'main' && !this.audioDisabled_) {\n this.timelineChangeController_.lastTimelineChange({\n type: 'audio',\n from: this.currentTimeline_,\n to: segmentInfo.timeline\n });\n }\n }\n this.currentTimeline_ = segmentInfo.timeline; // We must update the syncinfo to recalculate the seekable range before\n // the following conditional otherwise it may consider this a bad \"guess\"\n // and attempt to resync when the post-update seekable window and live\n // point would mean that this was the perfect segment to fetch\n\n this.trigger('syncinfoupdate');\n const segment = segmentInfo.segment;\n const part = segmentInfo.part;\n const badSegmentGuess = segment.end && this.currentTime_() - segment.end > segmentInfo.playlist.targetDuration * 3;\n const badPartGuess = part && part.end && this.currentTime_() - part.end > segmentInfo.playlist.partTargetDuration * 3; // If we previously appended a segment/part that ends more than 3 part/targetDurations before\n // the currentTime_ that means that our conservative guess was too conservative.\n // In that case, reset the loader state so that we try to use any information gained\n // from the previous request to create a new, more accurate, sync-point.\n\n if (badSegmentGuess || badPartGuess) {\n this.logger_(`bad ${badSegmentGuess ? 'segment' : 'part'} ${segmentInfoString(segmentInfo)}`);\n this.resetEverything();\n return;\n }\n const isWalkingForward = this.mediaIndex !== null; // Don't do a rendition switch unless we have enough time to get a sync segment\n // and conservatively guess\n\n if (isWalkingForward) {\n this.trigger('bandwidthupdate');\n }\n this.trigger('progress');\n this.mediaIndex = segmentInfo.mediaIndex;\n this.partIndex = segmentInfo.partIndex; // any time an update finishes and the last segment is in the\n // buffer, end the stream. this ensures the \"ended\" event will\n // fire if playback reaches that point.\n\n if (this.isEndOfStream_(segmentInfo.mediaIndex, segmentInfo.playlist, segmentInfo.partIndex)) {\n this.endOfStream();\n } // used for testing\n\n this.trigger('appended');\n if (segmentInfo.hasAppendedData_) {\n this.mediaAppends++;\n }\n if (!this.paused()) {\n this.monitorBuffer_();\n }\n }\n /**\n * Records the current throughput of the decrypt, transmux, and append\n * portion of the semgment pipeline. `throughput.rate` is a the cumulative\n * moving average of the throughput. `throughput.count` is the number of\n * data points in the average.\n *\n * @private\n * @param {Object} segmentInfo the object returned by loadSegment\n */\n\n recordThroughput_(segmentInfo) {\n if (segmentInfo.duration < MIN_SEGMENT_DURATION_TO_SAVE_STATS) {\n this.logger_(`Ignoring segment's throughput because its duration of ${segmentInfo.duration}` + ` is less than the min to record ${MIN_SEGMENT_DURATION_TO_SAVE_STATS}`);\n return;\n }\n const rate = this.throughput.rate; // Add one to the time to ensure that we don't accidentally attempt to divide\n // by zero in the case where the throughput is ridiculously high\n\n const segmentProcessingTime = Date.now() - segmentInfo.endOfAllRequests + 1; // Multiply by 8000 to convert from bytes/millisecond to bits/second\n\n const segmentProcessingThroughput = Math.floor(segmentInfo.byteLength / segmentProcessingTime * 8 * 1000); // This is just a cumulative moving average calculation:\n // newAvg = oldAvg + (sample - oldAvg) / (sampleCount + 1)\n\n this.throughput.rate += (segmentProcessingThroughput - rate) / ++this.throughput.count;\n }\n /**\n * Adds a cue to the segment-metadata track with some metadata information about the\n * segment\n *\n * @private\n * @param {Object} segmentInfo\n * the object returned by loadSegment\n * @method addSegmentMetadataCue_\n */\n\n addSegmentMetadataCue_(segmentInfo) {\n if (!this.segmentMetadataTrack_) {\n return;\n }\n const segment = segmentInfo.segment;\n const start = segment.start;\n const end = segment.end; // Do not try adding the cue if the start and end times are invalid.\n\n if (!finite(start) || !finite(end)) {\n return;\n }\n removeCuesFromTrack(start, end, this.segmentMetadataTrack_);\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n const value = {\n custom: segment.custom,\n dateTimeObject: segment.dateTimeObject,\n dateTimeString: segment.dateTimeString,\n programDateTime: segment.programDateTime,\n bandwidth: segmentInfo.playlist.attributes.BANDWIDTH,\n resolution: segmentInfo.playlist.attributes.RESOLUTION,\n codecs: segmentInfo.playlist.attributes.CODECS,\n byteLength: segmentInfo.byteLength,\n uri: segmentInfo.uri,\n timeline: segmentInfo.timeline,\n playlist: segmentInfo.playlist.id,\n start,\n end\n };\n const data = JSON.stringify(value);\n const cue = new Cue(start, end, data); // Attach the metadata to the value property of the cue to keep consistency between\n // the differences of WebKitDataCue in safari and VTTCue in other browsers\n\n cue.value = value;\n this.segmentMetadataTrack_.addCue(cue);\n }\n}\nfunction noop() {}\nconst toTitleCase = function (string) {\n if (typeof string !== 'string') {\n return string;\n }\n return string.replace(/./, w => w.toUpperCase());\n};\n\n/**\n * @file source-updater.js\n */\nconst bufferTypes = ['video', 'audio'];\nconst updating = (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`];\n return sourceBuffer && sourceBuffer.updating || sourceUpdater.queuePending[type];\n};\nconst nextQueueIndexOfType = (type, queue) => {\n for (let i = 0; i < queue.length; i++) {\n const queueEntry = queue[i];\n if (queueEntry.type === 'mediaSource') {\n // If the next entry is a media source entry (uses multiple source buffers), block\n // processing to allow it to go through first.\n return null;\n }\n if (queueEntry.type === type) {\n return i;\n }\n }\n return null;\n};\nconst shiftQueue = (type, sourceUpdater) => {\n if (sourceUpdater.queue.length === 0) {\n return;\n }\n let queueIndex = 0;\n let queueEntry = sourceUpdater.queue[queueIndex];\n if (queueEntry.type === 'mediaSource') {\n if (!sourceUpdater.updating() && sourceUpdater.mediaSource.readyState !== 'closed') {\n sourceUpdater.queue.shift();\n queueEntry.action(sourceUpdater);\n if (queueEntry.doneFn) {\n queueEntry.doneFn();\n } // Only specific source buffer actions must wait for async updateend events. Media\n // Source actions process synchronously. Therefore, both audio and video source\n // buffers are now clear to process the next queue entries.\n\n shiftQueue('audio', sourceUpdater);\n shiftQueue('video', sourceUpdater);\n } // Media Source actions require both source buffers, so if the media source action\n // couldn't process yet (because one or both source buffers are busy), block other\n // queue actions until both are available and the media source action can process.\n\n return;\n }\n if (type === 'mediaSource') {\n // If the queue was shifted by a media source action (this happens when pushing a\n // media source action onto the queue), then it wasn't from an updateend event from an\n // audio or video source buffer, so there's no change from previous state, and no\n // processing should be done.\n return;\n } // Media source queue entries don't need to consider whether the source updater is\n // started (i.e., source buffers are created) as they don't need the source buffers, but\n // source buffer queue entries do.\n\n if (!sourceUpdater.ready() || sourceUpdater.mediaSource.readyState === 'closed' || updating(type, sourceUpdater)) {\n return;\n }\n if (queueEntry.type !== type) {\n queueIndex = nextQueueIndexOfType(type, sourceUpdater.queue);\n if (queueIndex === null) {\n // Either there's no queue entry that uses this source buffer type in the queue, or\n // there's a media source queue entry before the next entry of this type, in which\n // case wait for that action to process first.\n return;\n }\n queueEntry = sourceUpdater.queue[queueIndex];\n }\n sourceUpdater.queue.splice(queueIndex, 1); // Keep a record that this source buffer type is in use.\n //\n // The queue pending operation must be set before the action is performed in the event\n // that the action results in a synchronous event that is acted upon. For instance, if\n // an exception is thrown that can be handled, it's possible that new actions will be\n // appended to an empty queue and immediately executed, but would not have the correct\n // pending information if this property was set after the action was performed.\n\n sourceUpdater.queuePending[type] = queueEntry;\n queueEntry.action(type, sourceUpdater);\n if (!queueEntry.doneFn) {\n // synchronous operation, process next entry\n sourceUpdater.queuePending[type] = null;\n shiftQueue(type, sourceUpdater);\n return;\n }\n};\nconst cleanupBuffer = (type, sourceUpdater) => {\n const buffer = sourceUpdater[`${type}Buffer`];\n const titleType = toTitleCase(type);\n if (!buffer) {\n return;\n }\n buffer.removeEventListener('updateend', sourceUpdater[`on${titleType}UpdateEnd_`]);\n buffer.removeEventListener('error', sourceUpdater[`on${titleType}Error_`]);\n sourceUpdater.codecs[type] = null;\n sourceUpdater[`${type}Buffer`] = null;\n};\nconst inSourceBuffers = (mediaSource, sourceBuffer) => mediaSource && sourceBuffer && Array.prototype.indexOf.call(mediaSource.sourceBuffers, sourceBuffer) !== -1;\nconst actions = {\n appendBuffer: (bytes, segmentInfo, onError) => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Appending segment ${segmentInfo.mediaIndex}'s ${bytes.length} bytes to ${type}Buffer`);\n try {\n sourceBuffer.appendBuffer(bytes);\n } catch (e) {\n sourceUpdater.logger_(`Error with code ${e.code} ` + (e.code === QUOTA_EXCEEDED_ERR ? '(QUOTA_EXCEEDED_ERR) ' : '') + `when appending segment ${segmentInfo.mediaIndex} to ${type}Buffer`);\n sourceUpdater.queuePending[type] = null;\n onError(e);\n }\n },\n remove: (start, end) => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Removing ${start} to ${end} from ${type}Buffer`);\n try {\n sourceBuffer.remove(start, end);\n } catch (e) {\n sourceUpdater.logger_(`Remove ${start} to ${end} from ${type}Buffer failed`);\n }\n },\n timestampOffset: offset => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Setting ${type}timestampOffset to ${offset}`);\n sourceBuffer.timestampOffset = offset;\n },\n callback: callback => (type, sourceUpdater) => {\n callback();\n },\n endOfStream: error => sourceUpdater => {\n if (sourceUpdater.mediaSource.readyState !== 'open') {\n return;\n }\n sourceUpdater.logger_(`Calling mediaSource endOfStream(${error || ''})`);\n try {\n sourceUpdater.mediaSource.endOfStream(error);\n } catch (e) {\n videojs.log.warn('Failed to call media source endOfStream', e);\n }\n },\n duration: duration => sourceUpdater => {\n sourceUpdater.logger_(`Setting mediaSource duration to ${duration}`);\n try {\n sourceUpdater.mediaSource.duration = duration;\n } catch (e) {\n videojs.log.warn('Failed to set media source duration', e);\n }\n },\n abort: () => (type, sourceUpdater) => {\n if (sourceUpdater.mediaSource.readyState !== 'open') {\n return;\n }\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`calling abort on ${type}Buffer`);\n try {\n sourceBuffer.abort();\n } catch (e) {\n videojs.log.warn(`Failed to abort on ${type}Buffer`, e);\n }\n },\n addSourceBuffer: (type, codec) => sourceUpdater => {\n const titleType = toTitleCase(type);\n const mime = getMimeForCodec(codec);\n sourceUpdater.logger_(`Adding ${type}Buffer with codec ${codec} to mediaSource`);\n const sourceBuffer = sourceUpdater.mediaSource.addSourceBuffer(mime);\n sourceBuffer.addEventListener('updateend', sourceUpdater[`on${titleType}UpdateEnd_`]);\n sourceBuffer.addEventListener('error', sourceUpdater[`on${titleType}Error_`]);\n sourceUpdater.codecs[type] = codec;\n sourceUpdater[`${type}Buffer`] = sourceBuffer;\n },\n removeSourceBuffer: type => sourceUpdater => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`];\n cleanupBuffer(type, sourceUpdater); // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Removing ${type}Buffer with codec ${sourceUpdater.codecs[type]} from mediaSource`);\n try {\n sourceUpdater.mediaSource.removeSourceBuffer(sourceBuffer);\n } catch (e) {\n videojs.log.warn(`Failed to removeSourceBuffer ${type}Buffer`, e);\n }\n },\n changeType: codec => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`];\n const mime = getMimeForCodec(codec); // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n } // do not update codec if we don't need to.\n // Only update if we change the codec base.\n // For example, going from avc1.640028 to avc1.64001f does not require a changeType call.\n\n const newCodecBase = codec.substring(0, codec.indexOf('.'));\n const oldCodec = sourceUpdater.codecs[type];\n const oldCodecBase = oldCodec.substring(0, oldCodec.indexOf('.'));\n if (oldCodecBase === newCodecBase) {\n return;\n }\n const metadata = {\n codecsChangeInfo: {\n from: oldCodec,\n to: codec\n }\n };\n sourceUpdater.trigger({\n type: 'codecschange',\n metadata\n });\n sourceUpdater.logger_(`changing ${type}Buffer codec from ${oldCodec} to ${codec}`); // check if change to the provided type is supported\n\n try {\n sourceBuffer.changeType(mime);\n sourceUpdater.codecs[type] = codec;\n } catch (e) {\n metadata.errorType = videojs.Error.StreamingCodecsChangeError;\n metadata.error = e;\n e.metadata = metadata;\n sourceUpdater.error_ = e;\n sourceUpdater.trigger('error');\n videojs.log.warn(`Failed to changeType on ${type}Buffer`, e);\n }\n }\n};\nconst pushQueue = ({\n type,\n sourceUpdater,\n action,\n doneFn,\n name\n}) => {\n sourceUpdater.queue.push({\n type,\n action,\n doneFn,\n name\n });\n shiftQueue(type, sourceUpdater);\n};\nconst onUpdateend = (type, sourceUpdater) => e => {\n // Although there should, in theory, be a pending action for any updateend receieved,\n // there are some actions that may trigger updateend events without set definitions in\n // the w3c spec. For instance, setting the duration on the media source may trigger\n // updateend events on source buffers. This does not appear to be in the spec. As such,\n // if we encounter an updateend without a corresponding pending action from our queue\n // for that source buffer type, process the next action.\n const bufferedRangesForType = sourceUpdater[`${type}Buffered`]();\n const descriptiveString = bufferedRangesToString(bufferedRangesForType);\n sourceUpdater.logger_(`received \"updateend\" event for ${type} Source Buffer: `, descriptiveString);\n if (sourceUpdater.queuePending[type]) {\n const doneFn = sourceUpdater.queuePending[type].doneFn;\n sourceUpdater.queuePending[type] = null;\n if (doneFn) {\n // if there's an error, report it\n doneFn(sourceUpdater[`${type}Error_`]);\n }\n }\n shiftQueue(type, sourceUpdater);\n};\n/**\n * A queue of callbacks to be serialized and applied when a\n * MediaSource and its associated SourceBuffers are not in the\n * updating state. It is used by the segment loader to update the\n * underlying SourceBuffers when new data is loaded, for instance.\n *\n * @class SourceUpdater\n * @param {MediaSource} mediaSource the MediaSource to create the SourceBuffer from\n * @param {string} mimeType the desired MIME type of the underlying SourceBuffer\n */\n\nclass SourceUpdater extends videojs.EventTarget {\n constructor(mediaSource) {\n super();\n this.mediaSource = mediaSource;\n this.sourceopenListener_ = () => shiftQueue('mediaSource', this);\n this.mediaSource.addEventListener('sourceopen', this.sourceopenListener_);\n this.logger_ = logger('SourceUpdater'); // initial timestamp offset is 0\n\n this.audioTimestampOffset_ = 0;\n this.videoTimestampOffset_ = 0;\n this.queue = [];\n this.queuePending = {\n audio: null,\n video: null\n };\n this.delayedAudioAppendQueue_ = [];\n this.videoAppendQueued_ = false;\n this.codecs = {};\n this.onVideoUpdateEnd_ = onUpdateend('video', this);\n this.onAudioUpdateEnd_ = onUpdateend('audio', this);\n this.onVideoError_ = e => {\n // used for debugging\n this.videoError_ = e;\n };\n this.onAudioError_ = e => {\n // used for debugging\n this.audioError_ = e;\n };\n this.createdSourceBuffers_ = false;\n this.initializedEme_ = false;\n this.triggeredReady_ = false;\n }\n initializedEme() {\n this.initializedEme_ = true;\n this.triggerReady();\n }\n hasCreatedSourceBuffers() {\n // if false, likely waiting on one of the segment loaders to get enough data to create\n // source buffers\n return this.createdSourceBuffers_;\n }\n hasInitializedAnyEme() {\n return this.initializedEme_;\n }\n ready() {\n return this.hasCreatedSourceBuffers() && this.hasInitializedAnyEme();\n }\n createSourceBuffers(codecs) {\n if (this.hasCreatedSourceBuffers()) {\n // already created them before\n return;\n } // the intial addOrChangeSourceBuffers will always be\n // two add buffers.\n\n this.addOrChangeSourceBuffers(codecs);\n this.createdSourceBuffers_ = true;\n this.trigger('createdsourcebuffers');\n this.triggerReady();\n }\n triggerReady() {\n // only allow ready to be triggered once, this prevents the case\n // where:\n // 1. we trigger createdsourcebuffers\n // 2. ie 11 synchronously initializates eme\n // 3. the synchronous initialization causes us to trigger ready\n // 4. We go back to the ready check in createSourceBuffers and ready is triggered again.\n if (this.ready() && !this.triggeredReady_) {\n this.triggeredReady_ = true;\n this.trigger('ready');\n }\n }\n /**\n * Add a type of source buffer to the media source.\n *\n * @param {string} type\n * The type of source buffer to add.\n *\n * @param {string} codec\n * The codec to add the source buffer with.\n */\n\n addSourceBuffer(type, codec) {\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.addSourceBuffer(type, codec),\n name: 'addSourceBuffer'\n });\n }\n /**\n * call abort on a source buffer.\n *\n * @param {string} type\n * The type of source buffer to call abort on.\n */\n\n abort(type) {\n pushQueue({\n type,\n sourceUpdater: this,\n action: actions.abort(type),\n name: 'abort'\n });\n }\n /**\n * Call removeSourceBuffer and remove a specific type\n * of source buffer on the mediaSource.\n *\n * @param {string} type\n * The type of source buffer to remove.\n */\n\n removeSourceBuffer(type) {\n if (!this.canRemoveSourceBuffer()) {\n videojs.log.error('removeSourceBuffer is not supported!');\n return;\n }\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.removeSourceBuffer(type),\n name: 'removeSourceBuffer'\n });\n }\n /**\n * Whether or not the removeSourceBuffer function is supported\n * on the mediaSource.\n *\n * @return {boolean}\n * if removeSourceBuffer can be called.\n */\n\n canRemoveSourceBuffer() {\n // As of Firefox 83 removeSourceBuffer\n // throws errors, so we report that it does not support this.\n return !videojs.browser.IS_FIREFOX && window$1.MediaSource && window$1.MediaSource.prototype && typeof window$1.MediaSource.prototype.removeSourceBuffer === 'function';\n }\n /**\n * Whether or not the changeType function is supported\n * on our SourceBuffers.\n *\n * @return {boolean}\n * if changeType can be called.\n */\n\n static canChangeType() {\n return window$1.SourceBuffer && window$1.SourceBuffer.prototype && typeof window$1.SourceBuffer.prototype.changeType === 'function';\n }\n /**\n * Whether or not the changeType function is supported\n * on our SourceBuffers.\n *\n * @return {boolean}\n * if changeType can be called.\n */\n\n canChangeType() {\n return this.constructor.canChangeType();\n }\n /**\n * Call the changeType function on a source buffer, given the code and type.\n *\n * @param {string} type\n * The type of source buffer to call changeType on.\n *\n * @param {string} codec\n * The codec string to change type with on the source buffer.\n */\n\n changeType(type, codec) {\n if (!this.canChangeType()) {\n videojs.log.error('changeType is not supported!');\n return;\n }\n pushQueue({\n type,\n sourceUpdater: this,\n action: actions.changeType(codec),\n name: 'changeType'\n });\n }\n /**\n * Add source buffers with a codec or, if they are already created,\n * call changeType on source buffers using changeType.\n *\n * @param {Object} codecs\n * Codecs to switch to\n */\n\n addOrChangeSourceBuffers(codecs) {\n if (!codecs || typeof codecs !== 'object' || Object.keys(codecs).length === 0) {\n throw new Error('Cannot addOrChangeSourceBuffers to undefined codecs');\n }\n Object.keys(codecs).forEach(type => {\n const codec = codecs[type];\n if (!this.hasCreatedSourceBuffers()) {\n return this.addSourceBuffer(type, codec);\n }\n if (this.canChangeType()) {\n this.changeType(type, codec);\n }\n });\n }\n /**\n * Queue an update to append an ArrayBuffer.\n *\n * @param {MediaObject} object containing audioBytes and/or videoBytes\n * @param {Function} done the function to call when done\n * @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-appendBuffer-void-ArrayBuffer-data\n */\n\n appendBuffer(options, doneFn) {\n const {\n segmentInfo,\n type,\n bytes\n } = options;\n this.processedAppend_ = true;\n if (type === 'audio' && this.videoBuffer && !this.videoAppendQueued_) {\n this.delayedAudioAppendQueue_.push([options, doneFn]);\n this.logger_(`delayed audio append of ${bytes.length} until video append`);\n return;\n } // In the case of certain errors, for instance, QUOTA_EXCEEDED_ERR, updateend will\n // not be fired. This means that the queue will be blocked until the next action\n // taken by the segment-loader. Provide a mechanism for segment-loader to handle\n // these errors by calling the doneFn with the specific error.\n\n const onError = doneFn;\n pushQueue({\n type,\n sourceUpdater: this,\n action: actions.appendBuffer(bytes, segmentInfo || {\n mediaIndex: -1\n }, onError),\n doneFn,\n name: 'appendBuffer'\n });\n if (type === 'video') {\n this.videoAppendQueued_ = true;\n if (!this.delayedAudioAppendQueue_.length) {\n return;\n }\n const queue = this.delayedAudioAppendQueue_.slice();\n this.logger_(`queuing delayed audio ${queue.length} appendBuffers`);\n this.delayedAudioAppendQueue_.length = 0;\n queue.forEach(que => {\n this.appendBuffer.apply(this, que);\n });\n }\n }\n /**\n * Get the audio buffer's buffered timerange.\n *\n * @return {TimeRange}\n * The audio buffer's buffered time range\n */\n\n audioBuffered() {\n // no media source/source buffer or it isn't in the media sources\n // source buffer list\n if (!inSourceBuffers(this.mediaSource, this.audioBuffer)) {\n return createTimeRanges();\n }\n return this.audioBuffer.buffered ? this.audioBuffer.buffered : createTimeRanges();\n }\n /**\n * Get the video buffer's buffered timerange.\n *\n * @return {TimeRange}\n * The video buffer's buffered time range\n */\n\n videoBuffered() {\n // no media source/source buffer or it isn't in the media sources\n // source buffer list\n if (!inSourceBuffers(this.mediaSource, this.videoBuffer)) {\n return createTimeRanges();\n }\n return this.videoBuffer.buffered ? this.videoBuffer.buffered : createTimeRanges();\n }\n /**\n * Get a combined video/audio buffer's buffered timerange.\n *\n * @return {TimeRange}\n * the combined time range\n */\n\n buffered() {\n const video = inSourceBuffers(this.mediaSource, this.videoBuffer) ? this.videoBuffer : null;\n const audio = inSourceBuffers(this.mediaSource, this.audioBuffer) ? this.audioBuffer : null;\n if (audio && !video) {\n return this.audioBuffered();\n }\n if (video && !audio) {\n return this.videoBuffered();\n }\n return bufferIntersection(this.audioBuffered(), this.videoBuffered());\n }\n /**\n * Add a callback to the queue that will set duration on the mediaSource.\n *\n * @param {number} duration\n * The duration to set\n *\n * @param {Function} [doneFn]\n * function to run after duration has been set.\n */\n\n setDuration(duration, doneFn = noop) {\n // In order to set the duration on the media source, it's necessary to wait for all\n // source buffers to no longer be updating. \"If the updating attribute equals true on\n // any SourceBuffer in sourceBuffers, then throw an InvalidStateError exception and\n // abort these steps.\" (source: https://www.w3.org/TR/media-source/#attributes).\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.duration(duration),\n name: 'duration',\n doneFn\n });\n }\n /**\n * Add a mediaSource endOfStream call to the queue\n *\n * @param {Error} [error]\n * Call endOfStream with an error\n *\n * @param {Function} [doneFn]\n * A function that should be called when the\n * endOfStream call has finished.\n */\n\n endOfStream(error = null, doneFn = noop) {\n if (typeof error !== 'string') {\n error = undefined;\n } // In order to set the duration on the media source, it's necessary to wait for all\n // source buffers to no longer be updating. \"If the updating attribute equals true on\n // any SourceBuffer in sourceBuffers, then throw an InvalidStateError exception and\n // abort these steps.\" (source: https://www.w3.org/TR/media-source/#attributes).\n\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.endOfStream(error),\n name: 'endOfStream',\n doneFn\n });\n }\n /**\n * Queue an update to remove a time range from the buffer.\n *\n * @param {number} start where to start the removal\n * @param {number} end where to end the removal\n * @param {Function} [done=noop] optional callback to be executed when the remove\n * operation is complete\n * @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-remove-void-double-start-unrestricted-double-end\n */\n\n removeAudio(start, end, done = noop) {\n if (!this.audioBuffered().length || this.audioBuffered().end(0) === 0) {\n done();\n return;\n }\n pushQueue({\n type: 'audio',\n sourceUpdater: this,\n action: actions.remove(start, end),\n doneFn: done,\n name: 'remove'\n });\n }\n /**\n * Queue an update to remove a time range from the buffer.\n *\n * @param {number} start where to start the removal\n * @param {number} end where to end the removal\n * @param {Function} [done=noop] optional callback to be executed when the remove\n * operation is complete\n * @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-remove-void-double-start-unrestricted-double-end\n */\n\n removeVideo(start, end, done = noop) {\n if (!this.videoBuffered().length || this.videoBuffered().end(0) === 0) {\n done();\n return;\n }\n pushQueue({\n type: 'video',\n sourceUpdater: this,\n action: actions.remove(start, end),\n doneFn: done,\n name: 'remove'\n });\n }\n /**\n * Whether the underlying sourceBuffer is updating or not\n *\n * @return {boolean} the updating status of the SourceBuffer\n */\n\n updating() {\n // the audio/video source buffer is updating\n if (updating('audio', this) || updating('video', this)) {\n return true;\n }\n return false;\n }\n /**\n * Set/get the timestampoffset on the audio SourceBuffer\n *\n * @return {number} the timestamp offset\n */\n\n audioTimestampOffset(offset) {\n if (typeof offset !== 'undefined' && this.audioBuffer &&\n // no point in updating if it's the same\n this.audioTimestampOffset_ !== offset) {\n pushQueue({\n type: 'audio',\n sourceUpdater: this,\n action: actions.timestampOffset(offset),\n name: 'timestampOffset'\n });\n this.audioTimestampOffset_ = offset;\n }\n return this.audioTimestampOffset_;\n }\n /**\n * Set/get the timestampoffset on the video SourceBuffer\n *\n * @return {number} the timestamp offset\n */\n\n videoTimestampOffset(offset) {\n if (typeof offset !== 'undefined' && this.videoBuffer &&\n // no point in updating if it's the same\n this.videoTimestampOffset_ !== offset) {\n pushQueue({\n type: 'video',\n sourceUpdater: this,\n action: actions.timestampOffset(offset),\n name: 'timestampOffset'\n });\n this.videoTimestampOffset_ = offset;\n }\n return this.videoTimestampOffset_;\n }\n /**\n * Add a function to the queue that will be called\n * when it is its turn to run in the audio queue.\n *\n * @param {Function} callback\n * The callback to queue.\n */\n\n audioQueueCallback(callback) {\n if (!this.audioBuffer) {\n return;\n }\n pushQueue({\n type: 'audio',\n sourceUpdater: this,\n action: actions.callback(callback),\n name: 'callback'\n });\n }\n /**\n * Add a function to the queue that will be called\n * when it is its turn to run in the video queue.\n *\n * @param {Function} callback\n * The callback to queue.\n */\n\n videoQueueCallback(callback) {\n if (!this.videoBuffer) {\n return;\n }\n pushQueue({\n type: 'video',\n sourceUpdater: this,\n action: actions.callback(callback),\n name: 'callback'\n });\n }\n /**\n * dispose of the source updater and the underlying sourceBuffer\n */\n\n dispose() {\n this.trigger('dispose');\n bufferTypes.forEach(type => {\n this.abort(type);\n if (this.canRemoveSourceBuffer()) {\n this.removeSourceBuffer(type);\n } else {\n this[`${type}QueueCallback`](() => cleanupBuffer(type, this));\n }\n });\n this.videoAppendQueued_ = false;\n this.delayedAudioAppendQueue_.length = 0;\n if (this.sourceopenListener_) {\n this.mediaSource.removeEventListener('sourceopen', this.sourceopenListener_);\n }\n this.off();\n }\n}\nconst uint8ToUtf8 = uintArray => decodeURIComponent(escape(String.fromCharCode.apply(null, uintArray)));\nconst bufferToHexString = buffer => {\n const uInt8Buffer = new Uint8Array(buffer);\n return Array.from(uInt8Buffer).map(byte => byte.toString(16).padStart(2, '0')).join('');\n};\n\n/**\n * @file vtt-segment-loader.js\n */\nconst VTT_LINE_TERMINATORS = new Uint8Array('\\n\\n'.split('').map(char => char.charCodeAt(0)));\nclass NoVttJsError extends Error {\n constructor() {\n super('Trying to parse received VTT cues, but there is no WebVTT. Make sure vtt.js is loaded.');\n }\n}\n/**\n * An object that manages segment loading and appending.\n *\n * @class VTTSegmentLoader\n * @param {Object} options required and optional options\n * @extends videojs.EventTarget\n */\n\nclass VTTSegmentLoader extends SegmentLoader {\n constructor(settings, options = {}) {\n super(settings, options); // SegmentLoader requires a MediaSource be specified or it will throw an error;\n // however, VTTSegmentLoader has no need of a media source, so delete the reference\n\n this.mediaSource_ = null;\n this.subtitlesTrack_ = null;\n this.featuresNativeTextTracks_ = settings.featuresNativeTextTracks;\n this.loadVttJs = settings.loadVttJs; // The VTT segment will have its own time mappings. Saving VTT segment timing info in\n // the sync controller leads to improper behavior.\n\n this.shouldSaveSegmentTimingInfo_ = false;\n }\n createTransmuxer_() {\n // don't need to transmux any subtitles\n return null;\n }\n /**\n * Indicates which time ranges are buffered\n *\n * @return {TimeRange}\n * TimeRange object representing the current buffered ranges\n */\n\n buffered_() {\n if (!this.subtitlesTrack_ || !this.subtitlesTrack_.cues || !this.subtitlesTrack_.cues.length) {\n return createTimeRanges();\n }\n const cues = this.subtitlesTrack_.cues;\n const start = cues[0].startTime;\n const end = cues[cues.length - 1].startTime;\n return createTimeRanges([[start, end]]);\n }\n /**\n * Gets and sets init segment for the provided map\n *\n * @param {Object} map\n * The map object representing the init segment to get or set\n * @param {boolean=} set\n * If true, the init segment for the provided map should be saved\n * @return {Object}\n * map object for desired init segment\n */\n\n initSegmentForMap(map, set = false) {\n if (!map) {\n return null;\n }\n const id = initSegmentId(map);\n let storedMap = this.initSegments_[id];\n if (set && !storedMap && map.bytes) {\n // append WebVTT line terminators to the media initialization segment if it exists\n // to follow the WebVTT spec (https://w3c.github.io/webvtt/#file-structure) that\n // requires two or more WebVTT line terminators between the WebVTT header and the\n // rest of the file\n const combinedByteLength = VTT_LINE_TERMINATORS.byteLength + map.bytes.byteLength;\n const combinedSegment = new Uint8Array(combinedByteLength);\n combinedSegment.set(map.bytes);\n combinedSegment.set(VTT_LINE_TERMINATORS, map.bytes.byteLength);\n this.initSegments_[id] = storedMap = {\n resolvedUri: map.resolvedUri,\n byterange: map.byterange,\n bytes: combinedSegment\n };\n }\n return storedMap || map;\n }\n /**\n * Returns true if all configuration required for loading is present, otherwise false.\n *\n * @return {boolean} True if the all configuration is ready for loading\n * @private\n */\n\n couldBeginLoading_() {\n return this.playlist_ && this.subtitlesTrack_ && !this.paused();\n }\n /**\n * Once all the starting parameters have been specified, begin\n * operation. This method should only be invoked from the INIT\n * state.\n *\n * @private\n */\n\n init_() {\n this.state = 'READY';\n this.resetEverything();\n return this.monitorBuffer_();\n }\n /**\n * Set a subtitle track on the segment loader to add subtitles to\n *\n * @param {TextTrack=} track\n * The text track to add loaded subtitles to\n * @return {TextTrack}\n * Returns the subtitles track\n */\n\n track(track) {\n if (typeof track === 'undefined') {\n return this.subtitlesTrack_;\n }\n this.subtitlesTrack_ = track; // if we were unpaused but waiting for a sourceUpdater, start\n // buffering now\n\n if (this.state === 'INIT' && this.couldBeginLoading_()) {\n this.init_();\n }\n return this.subtitlesTrack_;\n }\n /**\n * Remove any data in the source buffer between start and end times\n *\n * @param {number} start - the start time of the region to remove from the buffer\n * @param {number} end - the end time of the region to remove from the buffer\n */\n\n remove(start, end) {\n removeCuesFromTrack(start, end, this.subtitlesTrack_);\n }\n /**\n * fill the buffer with segements unless the sourceBuffers are\n * currently updating\n *\n * Note: this function should only ever be called by monitorBuffer_\n * and never directly\n *\n * @private\n */\n\n fillBuffer_() {\n // see if we need to begin loading immediately\n const segmentInfo = this.chooseNextRequest_();\n if (!segmentInfo) {\n return;\n }\n if (this.syncController_.timestampOffsetForTimeline(segmentInfo.timeline) === null) {\n // We don't have the timestamp offset that we need to sync subtitles.\n // Rerun on a timestamp offset or user interaction.\n const checkTimestampOffset = () => {\n this.state = 'READY';\n if (!this.paused()) {\n // if not paused, queue a buffer check as soon as possible\n this.monitorBuffer_();\n }\n };\n this.syncController_.one('timestampoffset', checkTimestampOffset);\n this.state = 'WAITING_ON_TIMELINE';\n return;\n }\n this.loadSegment_(segmentInfo);\n } // never set a timestamp offset for vtt segments.\n\n timestampOffsetForSegment_() {\n return null;\n }\n chooseNextRequest_() {\n return this.skipEmptySegments_(super.chooseNextRequest_());\n }\n /**\n * Prevents the segment loader from requesting segments we know contain no subtitles\n * by walking forward until we find the next segment that we don't know whether it is\n * empty or not.\n *\n * @param {Object} segmentInfo\n * a segment info object that describes the current segment\n * @return {Object}\n * a segment info object that describes the current segment\n */\n\n skipEmptySegments_(segmentInfo) {\n while (segmentInfo && segmentInfo.segment.empty) {\n // stop at the last possible segmentInfo\n if (segmentInfo.mediaIndex + 1 >= segmentInfo.playlist.segments.length) {\n segmentInfo = null;\n break;\n }\n segmentInfo = this.generateSegmentInfo_({\n playlist: segmentInfo.playlist,\n mediaIndex: segmentInfo.mediaIndex + 1,\n startOfSegment: segmentInfo.startOfSegment + segmentInfo.duration,\n isSyncRequest: segmentInfo.isSyncRequest\n });\n }\n return segmentInfo;\n }\n stopForError(error) {\n this.error(error);\n this.state = 'READY';\n this.pause();\n this.trigger('error');\n }\n /**\n * append a decrypted segement to the SourceBuffer through a SourceUpdater\n *\n * @private\n */\n\n segmentRequestFinished_(error, simpleSegment, result) {\n if (!this.subtitlesTrack_) {\n this.state = 'READY';\n return;\n }\n this.saveTransferStats_(simpleSegment.stats); // the request was aborted\n\n if (!this.pendingSegment_) {\n this.state = 'READY';\n this.mediaRequestsAborted += 1;\n return;\n }\n if (error) {\n if (error.code === REQUEST_ERRORS.TIMEOUT) {\n this.handleTimeout_();\n }\n if (error.code === REQUEST_ERRORS.ABORTED) {\n this.mediaRequestsAborted += 1;\n } else {\n this.mediaRequestsErrored += 1;\n }\n this.stopForError(error);\n return;\n }\n const segmentInfo = this.pendingSegment_; // although the VTT segment loader bandwidth isn't really used, it's good to\n // maintain functionality between segment loaders\n\n this.saveBandwidthRelatedStats_(segmentInfo.duration, simpleSegment.stats); // if this request included a segment key, save that data in the cache\n\n if (simpleSegment.key) {\n this.segmentKey(simpleSegment.key, true);\n }\n this.state = 'APPENDING'; // used for tests\n\n this.trigger('appending');\n const segment = segmentInfo.segment;\n if (segment.map) {\n segment.map.bytes = simpleSegment.map.bytes;\n }\n segmentInfo.bytes = simpleSegment.bytes; // Make sure that vttjs has loaded, otherwise, load it and wait till it finished loading\n\n if (typeof window$1.WebVTT !== 'function' && typeof this.loadVttJs === 'function') {\n this.state = 'WAITING_ON_VTTJS'; // should be fine to call multiple times\n // script will be loaded once but multiple listeners will be added to the queue, which is expected.\n\n this.loadVttJs().then(() => this.segmentRequestFinished_(error, simpleSegment, result), () => this.stopForError({\n message: 'Error loading vtt.js'\n }));\n return;\n }\n segment.requested = true;\n try {\n this.parseVTTCues_(segmentInfo);\n } catch (e) {\n this.stopForError({\n message: e.message,\n metadata: {\n errorType: videojs.Error.StreamingVttParserError,\n error: e\n }\n });\n return;\n }\n this.updateTimeMapping_(segmentInfo, this.syncController_.timelines[segmentInfo.timeline], this.playlist_);\n if (segmentInfo.cues.length) {\n segmentInfo.timingInfo = {\n start: segmentInfo.cues[0].startTime,\n end: segmentInfo.cues[segmentInfo.cues.length - 1].endTime\n };\n } else {\n segmentInfo.timingInfo = {\n start: segmentInfo.startOfSegment,\n end: segmentInfo.startOfSegment + segmentInfo.duration\n };\n }\n if (segmentInfo.isSyncRequest) {\n this.trigger('syncinfoupdate');\n this.pendingSegment_ = null;\n this.state = 'READY';\n return;\n }\n segmentInfo.byteLength = segmentInfo.bytes.byteLength;\n this.mediaSecondsLoaded += segment.duration; // Create VTTCue instances for each cue in the new segment and add them to\n // the subtitle track\n\n segmentInfo.cues.forEach(cue => {\n this.subtitlesTrack_.addCue(this.featuresNativeTextTracks_ ? new window$1.VTTCue(cue.startTime, cue.endTime, cue.text) : cue);\n }); // Remove any duplicate cues from the subtitle track. The WebVTT spec allows\n // cues to have identical time-intervals, but if the text is also identical\n // we can safely assume it is a duplicate that can be removed (ex. when a cue\n // \"overlaps\" VTT segments)\n\n removeDuplicateCuesFromTrack(this.subtitlesTrack_);\n this.handleAppendsDone_();\n }\n handleData_() {// noop as we shouldn't be getting video/audio data captions\n // that we do not support here.\n }\n updateTimingInfoEnd_() {// noop\n }\n /**\n * Uses the WebVTT parser to parse the segment response\n *\n * @throws NoVttJsError\n *\n * @param {Object} segmentInfo\n * a segment info object that describes the current segment\n * @private\n */\n\n parseVTTCues_(segmentInfo) {\n let decoder;\n let decodeBytesToString = false;\n if (typeof window$1.WebVTT !== 'function') {\n // caller is responsible for exception handling.\n throw new NoVttJsError();\n }\n if (typeof window$1.TextDecoder === 'function') {\n decoder = new window$1.TextDecoder('utf8');\n } else {\n decoder = window$1.WebVTT.StringDecoder();\n decodeBytesToString = true;\n }\n const parser = new window$1.WebVTT.Parser(window$1, window$1.vttjs, decoder);\n segmentInfo.cues = [];\n segmentInfo.timestampmap = {\n MPEGTS: 0,\n LOCAL: 0\n };\n parser.oncue = segmentInfo.cues.push.bind(segmentInfo.cues);\n parser.ontimestampmap = map => {\n segmentInfo.timestampmap = map;\n };\n parser.onparsingerror = error => {\n videojs.log.warn('Error encountered when parsing cues: ' + error.message);\n };\n if (segmentInfo.segment.map) {\n let mapData = segmentInfo.segment.map.bytes;\n if (decodeBytesToString) {\n mapData = uint8ToUtf8(mapData);\n }\n parser.parse(mapData);\n }\n let segmentData = segmentInfo.bytes;\n if (decodeBytesToString) {\n segmentData = uint8ToUtf8(segmentData);\n }\n parser.parse(segmentData);\n parser.flush();\n }\n /**\n * Updates the start and end times of any cues parsed by the WebVTT parser using\n * the information parsed from the X-TIMESTAMP-MAP header and a TS to media time mapping\n * from the SyncController\n *\n * @param {Object} segmentInfo\n * a segment info object that describes the current segment\n * @param {Object} mappingObj\n * object containing a mapping from TS to media time\n * @param {Object} playlist\n * the playlist object containing the segment\n * @private\n */\n\n updateTimeMapping_(segmentInfo, mappingObj, playlist) {\n const segment = segmentInfo.segment;\n if (!mappingObj) {\n // If the sync controller does not have a mapping of TS to Media Time for the\n // timeline, then we don't have enough information to update the cue\n // start/end times\n return;\n }\n if (!segmentInfo.cues.length) {\n // If there are no cues, we also do not have enough information to figure out\n // segment timing. Mark that the segment contains no cues so we don't re-request\n // an empty segment.\n segment.empty = true;\n return;\n }\n const {\n MPEGTS,\n LOCAL\n } = segmentInfo.timestampmap;\n /**\n * From the spec:\n * The MPEGTS media timestamp MUST use a 90KHz timescale,\n * even when non-WebVTT Media Segments use a different timescale.\n */\n\n const mpegTsInSeconds = MPEGTS / ONE_SECOND_IN_TS;\n const diff = mpegTsInSeconds - LOCAL + mappingObj.mapping;\n segmentInfo.cues.forEach(cue => {\n const duration = cue.endTime - cue.startTime;\n const startTime = MPEGTS === 0 ? cue.startTime + diff : this.handleRollover_(cue.startTime + diff, mappingObj.time);\n cue.startTime = Math.max(startTime, 0);\n cue.endTime = Math.max(startTime + duration, 0);\n });\n if (!playlist.syncInfo) {\n const firstStart = segmentInfo.cues[0].startTime;\n const lastStart = segmentInfo.cues[segmentInfo.cues.length - 1].startTime;\n playlist.syncInfo = {\n mediaSequence: playlist.mediaSequence + segmentInfo.mediaIndex,\n time: Math.min(firstStart, lastStart - segment.duration)\n };\n }\n }\n /**\n * MPEG-TS PES timestamps are limited to 2^33.\n * Once they reach 2^33, they roll over to 0.\n * mux.js handles PES timestamp rollover for the following scenarios:\n * [forward rollover(right)] ->\n * PES timestamps monotonically increase, and once they reach 2^33, they roll over to 0\n * [backward rollover(left)] -->\n * we seek back to position before rollover.\n *\n * According to the HLS SPEC:\n * When synchronizing WebVTT with PES timestamps, clients SHOULD account\n * for cases where the 33-bit PES timestamps have wrapped and the WebVTT\n * cue times have not. When the PES timestamp wraps, the WebVTT Segment\n * SHOULD have a X-TIMESTAMP-MAP header that maps the current WebVTT\n * time to the new (low valued) PES timestamp.\n *\n * So we want to handle rollover here and align VTT Cue start/end time to the player's time.\n */\n\n handleRollover_(value, reference) {\n if (reference === null) {\n return value;\n }\n let valueIn90khz = value * ONE_SECOND_IN_TS;\n const referenceIn90khz = reference * ONE_SECOND_IN_TS;\n let offset;\n if (referenceIn90khz < valueIn90khz) {\n // - 2^33\n offset = -8589934592;\n } else {\n // + 2^33\n offset = 8589934592;\n } // distance(value - reference) > 2^32\n\n while (Math.abs(valueIn90khz - referenceIn90khz) > 4294967296) {\n valueIn90khz += offset;\n }\n return valueIn90khz / ONE_SECOND_IN_TS;\n }\n}\n\n/**\n * @file ad-cue-tags.js\n */\n/**\n * Searches for an ad cue that overlaps with the given mediaTime\n *\n * @param {Object} track\n * the track to find the cue for\n *\n * @param {number} mediaTime\n * the time to find the cue at\n *\n * @return {Object|null}\n * the found cue or null\n */\n\nconst findAdCue = function (track, mediaTime) {\n const cues = track.cues;\n for (let i = 0; i < cues.length; i++) {\n const cue = cues[i];\n if (mediaTime >= cue.adStartTime && mediaTime <= cue.adEndTime) {\n return cue;\n }\n }\n return null;\n};\nconst updateAdCues = function (media, track, offset = 0) {\n if (!media.segments) {\n return;\n }\n let mediaTime = offset;\n let cue;\n for (let i = 0; i < media.segments.length; i++) {\n const segment = media.segments[i];\n if (!cue) {\n // Since the cues will span for at least the segment duration, adding a fudge\n // factor of half segment duration will prevent duplicate cues from being\n // created when timing info is not exact (e.g. cue start time initialized\n // at 10.006677, but next call mediaTime is 10.003332 )\n cue = findAdCue(track, mediaTime + segment.duration / 2);\n }\n if (cue) {\n if ('cueIn' in segment) {\n // Found a CUE-IN so end the cue\n cue.endTime = mediaTime;\n cue.adEndTime = mediaTime;\n mediaTime += segment.duration;\n cue = null;\n continue;\n }\n if (mediaTime < cue.endTime) {\n // Already processed this mediaTime for this cue\n mediaTime += segment.duration;\n continue;\n } // otherwise extend cue until a CUE-IN is found\n\n cue.endTime += segment.duration;\n } else {\n if ('cueOut' in segment) {\n cue = new window$1.VTTCue(mediaTime, mediaTime + segment.duration, segment.cueOut);\n cue.adStartTime = mediaTime; // Assumes tag format to be\n // #EXT-X-CUE-OUT:30\n\n cue.adEndTime = mediaTime + parseFloat(segment.cueOut);\n track.addCue(cue);\n }\n if ('cueOutCont' in segment) {\n // Entered into the middle of an ad cue\n // Assumes tag formate to be\n // #EXT-X-CUE-OUT-CONT:10/30\n const [adOffset, adTotal] = segment.cueOutCont.split('/').map(parseFloat);\n cue = new window$1.VTTCue(mediaTime, mediaTime + segment.duration, '');\n cue.adStartTime = mediaTime - adOffset;\n cue.adEndTime = cue.adStartTime + adTotal;\n track.addCue(cue);\n }\n }\n mediaTime += segment.duration;\n }\n};\nclass SyncInfo {\n /**\n * @param {number} start - media sequence start\n * @param {number} end - media sequence end\n * @param {number} segmentIndex - index for associated segment\n * @param {number|null} [partIndex] - index for associated part\n * @param {boolean} [appended] - appended indicator\n *\n */\n constructor({\n start,\n end,\n segmentIndex,\n partIndex = null,\n appended = false\n }) {\n this.start_ = start;\n this.end_ = end;\n this.segmentIndex_ = segmentIndex;\n this.partIndex_ = partIndex;\n this.appended_ = appended;\n }\n isInRange(targetTime) {\n return targetTime >= this.start && targetTime < this.end;\n }\n markAppended() {\n this.appended_ = true;\n }\n resetAppendedStatus() {\n this.appended_ = false;\n }\n get isAppended() {\n return this.appended_;\n }\n get start() {\n return this.start_;\n }\n get end() {\n return this.end_;\n }\n get segmentIndex() {\n return this.segmentIndex_;\n }\n get partIndex() {\n return this.partIndex_;\n }\n}\nclass SyncInfoData {\n /**\n *\n * @param {SyncInfo} segmentSyncInfo - sync info for a given segment\n * @param {Array} [partsSyncInfo] - sync infos for a list of parts for a given segment\n */\n constructor(segmentSyncInfo, partsSyncInfo = []) {\n this.segmentSyncInfo_ = segmentSyncInfo;\n this.partsSyncInfo_ = partsSyncInfo;\n }\n get segmentSyncInfo() {\n return this.segmentSyncInfo_;\n }\n get partsSyncInfo() {\n return this.partsSyncInfo_;\n }\n get hasPartsSyncInfo() {\n return this.partsSyncInfo_.length > 0;\n }\n resetAppendStatus() {\n this.segmentSyncInfo_.resetAppendedStatus();\n this.partsSyncInfo_.forEach(partSyncInfo => partSyncInfo.resetAppendedStatus());\n }\n}\nclass MediaSequenceSync {\n constructor() {\n /**\n * @type {Map}\n * @protected\n */\n this.storage_ = new Map();\n this.diagnostics_ = '';\n this.isReliable_ = false;\n this.start_ = -Infinity;\n this.end_ = Infinity;\n }\n get start() {\n return this.start_;\n }\n get end() {\n return this.end_;\n }\n get diagnostics() {\n return this.diagnostics_;\n }\n get isReliable() {\n return this.isReliable_;\n }\n resetAppendedStatus() {\n this.storage_.forEach(syncInfoData => syncInfoData.resetAppendStatus());\n }\n /**\n * update sync storage\n *\n * @param {Object} playlist\n * @param {number} currentTime\n *\n * @return {void}\n */\n\n update(playlist, currentTime) {\n const {\n mediaSequence,\n segments\n } = playlist;\n this.isReliable_ = this.isReliablePlaylist_(mediaSequence, segments);\n if (!this.isReliable_) {\n return;\n }\n return this.updateStorage_(segments, mediaSequence, this.calculateBaseTime_(mediaSequence, currentTime));\n }\n /**\n * @param {number} targetTime\n * @return {SyncInfo|null}\n */\n\n getSyncInfoForTime(targetTime) {\n for (const {\n segmentSyncInfo,\n partsSyncInfo\n } of this.storage_.values()) {\n // Normal segment flow:\n if (!partsSyncInfo.length) {\n if (segmentSyncInfo.isInRange(targetTime)) {\n return segmentSyncInfo;\n }\n } else {\n // Low latency flow:\n for (const partSyncInfo of partsSyncInfo) {\n if (partSyncInfo.isInRange(targetTime)) {\n return partSyncInfo;\n }\n }\n }\n }\n return null;\n }\n getSyncInfoForMediaSequence(mediaSequence) {\n return this.storage_.get(mediaSequence);\n }\n updateStorage_(segments, startingMediaSequence, startingTime) {\n const newStorage = new Map();\n let newDiagnostics = '\\n';\n let currentStart = startingTime;\n let currentMediaSequence = startingMediaSequence;\n this.start_ = currentStart;\n segments.forEach((segment, segmentIndex) => {\n const prevSyncInfoData = this.storage_.get(currentMediaSequence);\n const segmentStart = currentStart;\n const segmentEnd = segmentStart + segment.duration;\n const segmentIsAppended = Boolean(prevSyncInfoData && prevSyncInfoData.segmentSyncInfo && prevSyncInfoData.segmentSyncInfo.isAppended);\n const segmentSyncInfo = new SyncInfo({\n start: segmentStart,\n end: segmentEnd,\n appended: segmentIsAppended,\n segmentIndex\n });\n segment.syncInfo = segmentSyncInfo;\n let currentPartStart = currentStart;\n const partsSyncInfo = (segment.parts || []).map((part, partIndex) => {\n const partStart = currentPartStart;\n const partEnd = currentPartStart + part.duration;\n const partIsAppended = Boolean(prevSyncInfoData && prevSyncInfoData.partsSyncInfo && prevSyncInfoData.partsSyncInfo[partIndex] && prevSyncInfoData.partsSyncInfo[partIndex].isAppended);\n const partSyncInfo = new SyncInfo({\n start: partStart,\n end: partEnd,\n appended: partIsAppended,\n segmentIndex,\n partIndex\n });\n currentPartStart = partEnd;\n newDiagnostics += `Media Sequence: ${currentMediaSequence}.${partIndex} | Range: ${partStart} --> ${partEnd} | Appended: ${partIsAppended}\\n`;\n part.syncInfo = partSyncInfo;\n return partSyncInfo;\n });\n newStorage.set(currentMediaSequence, new SyncInfoData(segmentSyncInfo, partsSyncInfo));\n newDiagnostics += `${compactSegmentUrlDescription(segment.resolvedUri)} | Media Sequence: ${currentMediaSequence} | Range: ${segmentStart} --> ${segmentEnd} | Appended: ${segmentIsAppended}\\n`;\n currentMediaSequence++;\n currentStart = segmentEnd;\n });\n this.end_ = currentStart;\n this.storage_ = newStorage;\n this.diagnostics_ = newDiagnostics;\n }\n calculateBaseTime_(mediaSequence, fallback) {\n if (!this.storage_.size) {\n // Initial setup flow.\n return 0;\n }\n if (this.storage_.has(mediaSequence)) {\n // Normal flow.\n return this.storage_.get(mediaSequence).segmentSyncInfo.start;\n } // Fallback flow.\n // There is a gap between last recorded playlist and a new one received.\n\n return fallback;\n }\n isReliablePlaylist_(mediaSequence, segments) {\n return mediaSequence !== undefined && mediaSequence !== null && Array.isArray(segments) && segments.length;\n }\n}\nclass DependantMediaSequenceSync extends MediaSequenceSync {\n constructor(parent) {\n super();\n this.parent_ = parent;\n }\n calculateBaseTime_(mediaSequence, fallback) {\n if (!this.storage_.size) {\n const info = this.parent_.getSyncInfoForMediaSequence(mediaSequence);\n if (info) {\n return info.segmentSyncInfo.start;\n }\n return 0;\n }\n return super.calculateBaseTime_(mediaSequence, fallback);\n }\n}\n\n/**\n * @file sync-controller.js\n */\n// synchronize expired playlist segments.\n// the max media sequence diff is 48 hours of live stream\n// content with two second segments. Anything larger than that\n// will likely be invalid.\n\nconst MAX_MEDIA_SEQUENCE_DIFF_FOR_SYNC = 86400;\nconst syncPointStrategies = [\n// Stategy \"VOD\": Handle the VOD-case where the sync-point is *always*\n// the equivalence display-time 0 === segment-index 0\n{\n name: 'VOD',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n if (duration !== Infinity) {\n const syncPoint = {\n time: 0,\n segmentIndex: 0,\n partIndex: null\n };\n return syncPoint;\n }\n return null;\n }\n}, {\n name: 'MediaSequence',\n /**\n * run media sequence strategy\n *\n * @param {SyncController} syncController\n * @param {Object} playlist\n * @param {number} duration\n * @param {number} currentTimeline\n * @param {number} currentTime\n * @param {string} type\n */\n run: (syncController, playlist, duration, currentTimeline, currentTime, type) => {\n const mediaSequenceSync = syncController.getMediaSequenceSync(type);\n if (!mediaSequenceSync) {\n return null;\n }\n if (!mediaSequenceSync.isReliable) {\n return null;\n }\n const syncInfo = mediaSequenceSync.getSyncInfoForTime(currentTime);\n if (!syncInfo) {\n return null;\n }\n return {\n time: syncInfo.start,\n partIndex: syncInfo.partIndex,\n segmentIndex: syncInfo.segmentIndex\n };\n }\n},\n// Stategy \"ProgramDateTime\": We have a program-date-time tag in this playlist\n{\n name: 'ProgramDateTime',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n if (!Object.keys(syncController.timelineToDatetimeMappings).length) {\n return null;\n }\n let syncPoint = null;\n let lastDistance = null;\n const partsAndSegments = getPartsAndSegments(playlist);\n currentTime = currentTime || 0;\n for (let i = 0; i < partsAndSegments.length; i++) {\n // start from the end and loop backwards for live\n // or start from the front and loop forwards for non-live\n const index = playlist.endList || currentTime === 0 ? i : partsAndSegments.length - (i + 1);\n const partAndSegment = partsAndSegments[index];\n const segment = partAndSegment.segment;\n const datetimeMapping = syncController.timelineToDatetimeMappings[segment.timeline];\n if (!datetimeMapping || !segment.dateTimeObject) {\n continue;\n }\n const segmentTime = segment.dateTimeObject.getTime() / 1000;\n let start = segmentTime + datetimeMapping; // take part duration into account.\n\n if (segment.parts && typeof partAndSegment.partIndex === 'number') {\n for (let z = 0; z < partAndSegment.partIndex; z++) {\n start += segment.parts[z].duration;\n }\n }\n const distance = Math.abs(currentTime - start); // Once the distance begins to increase, or if distance is 0, we have passed\n // currentTime and can stop looking for better candidates\n\n if (lastDistance !== null && (distance === 0 || lastDistance < distance)) {\n break;\n }\n lastDistance = distance;\n syncPoint = {\n time: start,\n segmentIndex: partAndSegment.segmentIndex,\n partIndex: partAndSegment.partIndex\n };\n }\n return syncPoint;\n }\n},\n// Stategy \"Segment\": We have a known time mapping for a timeline and a\n// segment in the current timeline with timing data\n{\n name: 'Segment',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n let syncPoint = null;\n let lastDistance = null;\n currentTime = currentTime || 0;\n const partsAndSegments = getPartsAndSegments(playlist);\n for (let i = 0; i < partsAndSegments.length; i++) {\n // start from the end and loop backwards for live\n // or start from the front and loop forwards for non-live\n const index = playlist.endList || currentTime === 0 ? i : partsAndSegments.length - (i + 1);\n const partAndSegment = partsAndSegments[index];\n const segment = partAndSegment.segment;\n const start = partAndSegment.part && partAndSegment.part.start || segment && segment.start;\n if (segment.timeline === currentTimeline && typeof start !== 'undefined') {\n const distance = Math.abs(currentTime - start); // Once the distance begins to increase, we have passed\n // currentTime and can stop looking for better candidates\n\n if (lastDistance !== null && lastDistance < distance) {\n break;\n }\n if (!syncPoint || lastDistance === null || lastDistance >= distance) {\n lastDistance = distance;\n syncPoint = {\n time: start,\n segmentIndex: partAndSegment.segmentIndex,\n partIndex: partAndSegment.partIndex\n };\n }\n }\n }\n return syncPoint;\n }\n},\n// Stategy \"Discontinuity\": We have a discontinuity with a known\n// display-time\n{\n name: 'Discontinuity',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n let syncPoint = null;\n currentTime = currentTime || 0;\n if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) {\n let lastDistance = null;\n for (let i = 0; i < playlist.discontinuityStarts.length; i++) {\n const segmentIndex = playlist.discontinuityStarts[i];\n const discontinuity = playlist.discontinuitySequence + i + 1;\n const discontinuitySync = syncController.discontinuities[discontinuity];\n if (discontinuitySync) {\n const distance = Math.abs(currentTime - discontinuitySync.time); // Once the distance begins to increase, we have passed\n // currentTime and can stop looking for better candidates\n\n if (lastDistance !== null && lastDistance < distance) {\n break;\n }\n if (!syncPoint || lastDistance === null || lastDistance >= distance) {\n lastDistance = distance;\n syncPoint = {\n time: discontinuitySync.time,\n segmentIndex,\n partIndex: null\n };\n }\n }\n }\n }\n return syncPoint;\n }\n},\n// Stategy \"Playlist\": We have a playlist with a known mapping of\n// segment index to display time\n{\n name: 'Playlist',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n if (playlist.syncInfo) {\n const syncPoint = {\n time: playlist.syncInfo.time,\n segmentIndex: playlist.syncInfo.mediaSequence - playlist.mediaSequence,\n partIndex: null\n };\n return syncPoint;\n }\n return null;\n }\n}];\nclass SyncController extends videojs.EventTarget {\n constructor(options = {}) {\n super(); // ...for synching across variants\n\n this.timelines = [];\n this.discontinuities = [];\n this.timelineToDatetimeMappings = {}; // TODO: this map should be only available for HLS. Since only HLS has MediaSequence.\n // For some reason this map helps with syncing between quality switch for MPEG-DASH as well.\n // Moreover if we disable this map for MPEG-DASH - quality switch will be broken.\n // MPEG-DASH should have its own separate sync strategy\n\n const main = new MediaSequenceSync();\n const audio = new DependantMediaSequenceSync(main);\n const vtt = new DependantMediaSequenceSync(main);\n this.mediaSequenceStorage_ = {\n main,\n audio,\n vtt\n };\n this.logger_ = logger('SyncController');\n }\n /**\n *\n * @param {string} loaderType\n * @return {MediaSequenceSync|null}\n */\n\n getMediaSequenceSync(loaderType) {\n return this.mediaSequenceStorage_[loaderType] || null;\n }\n /**\n * Find a sync-point for the playlist specified\n *\n * A sync-point is defined as a known mapping from display-time to\n * a segment-index in the current playlist.\n *\n * @param {Playlist} playlist\n * The playlist that needs a sync-point\n * @param {number} duration\n * Duration of the MediaSource (Infinite if playing a live source)\n * @param {number} currentTimeline\n * The last timeline from which a segment was loaded\n * @param {number} currentTime\n * Current player's time\n * @param {string} type\n * Segment loader type\n * @return {Object}\n * A sync-point object\n */\n\n getSyncPoint(playlist, duration, currentTimeline, currentTime, type) {\n // Always use VOD sync point for VOD\n if (duration !== Infinity) {\n const vodSyncPointStrategy = syncPointStrategies.find(({\n name\n }) => name === 'VOD');\n return vodSyncPointStrategy.run(this, playlist, duration);\n }\n const syncPoints = this.runStrategies_(playlist, duration, currentTimeline, currentTime, type);\n if (!syncPoints.length) {\n // Signal that we need to attempt to get a sync-point manually\n // by fetching a segment in the playlist and constructing\n // a sync-point from that information\n return null;\n } // If we have exact match just return it instead of finding the nearest distance\n\n for (const syncPointInfo of syncPoints) {\n const {\n syncPoint,\n strategy\n } = syncPointInfo;\n const {\n segmentIndex,\n time\n } = syncPoint;\n if (segmentIndex < 0) {\n continue;\n }\n const selectedSegment = playlist.segments[segmentIndex];\n const start = time;\n const end = start + selectedSegment.duration;\n this.logger_(`Strategy: ${strategy}. Current time: ${currentTime}. selected segment: ${segmentIndex}. Time: [${start} -> ${end}]}`);\n if (currentTime >= start && currentTime < end) {\n this.logger_('Found sync point with exact match: ', syncPoint);\n return syncPoint;\n }\n } // Now find the sync-point that is closest to the currentTime because\n // that should result in the most accurate guess about which segment\n // to fetch\n\n return this.selectSyncPoint_(syncPoints, {\n key: 'time',\n value: currentTime\n });\n }\n /**\n * Calculate the amount of time that has expired off the playlist during playback\n *\n * @param {Playlist} playlist\n * Playlist object to calculate expired from\n * @param {number} duration\n * Duration of the MediaSource (Infinity if playling a live source)\n * @return {number|null}\n * The amount of time that has expired off the playlist during playback. Null\n * if no sync-points for the playlist can be found.\n */\n\n getExpiredTime(playlist, duration) {\n if (!playlist || !playlist.segments) {\n return null;\n }\n const syncPoints = this.runStrategies_(playlist, duration, playlist.discontinuitySequence, 0); // Without sync-points, there is not enough information to determine the expired time\n\n if (!syncPoints.length) {\n return null;\n }\n const syncPoint = this.selectSyncPoint_(syncPoints, {\n key: 'segmentIndex',\n value: 0\n }); // If the sync-point is beyond the start of the playlist, we want to subtract the\n // duration from index 0 to syncPoint.segmentIndex instead of adding.\n\n if (syncPoint.segmentIndex > 0) {\n syncPoint.time *= -1;\n }\n return Math.abs(syncPoint.time + sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: playlist.segments,\n startIndex: syncPoint.segmentIndex,\n endIndex: 0\n }));\n }\n /**\n * Runs each sync-point strategy and returns a list of sync-points returned by the\n * strategies\n *\n * @private\n * @param {Playlist} playlist\n * The playlist that needs a sync-point\n * @param {number} duration\n * Duration of the MediaSource (Infinity if playing a live source)\n * @param {number} currentTimeline\n * The last timeline from which a segment was loaded\n * @param {number} currentTime\n * Current player's time\n * @param {string} type\n * Segment loader type\n * @return {Array}\n * A list of sync-point objects\n */\n\n runStrategies_(playlist, duration, currentTimeline, currentTime, type) {\n const syncPoints = []; // Try to find a sync-point in by utilizing various strategies...\n\n for (let i = 0; i < syncPointStrategies.length; i++) {\n const strategy = syncPointStrategies[i];\n const syncPoint = strategy.run(this, playlist, duration, currentTimeline, currentTime, type);\n if (syncPoint) {\n syncPoint.strategy = strategy.name;\n syncPoints.push({\n strategy: strategy.name,\n syncPoint\n });\n }\n }\n return syncPoints;\n }\n /**\n * Selects the sync-point nearest the specified target\n *\n * @private\n * @param {Array} syncPoints\n * List of sync-points to select from\n * @param {Object} target\n * Object specifying the property and value we are targeting\n * @param {string} target.key\n * Specifies the property to target. Must be either 'time' or 'segmentIndex'\n * @param {number} target.value\n * The value to target for the specified key.\n * @return {Object}\n * The sync-point nearest the target\n */\n\n selectSyncPoint_(syncPoints, target) {\n let bestSyncPoint = syncPoints[0].syncPoint;\n let bestDistance = Math.abs(syncPoints[0].syncPoint[target.key] - target.value);\n let bestStrategy = syncPoints[0].strategy;\n for (let i = 1; i < syncPoints.length; i++) {\n const newDistance = Math.abs(syncPoints[i].syncPoint[target.key] - target.value);\n if (newDistance < bestDistance) {\n bestDistance = newDistance;\n bestSyncPoint = syncPoints[i].syncPoint;\n bestStrategy = syncPoints[i].strategy;\n }\n }\n this.logger_(`syncPoint for [${target.key}: ${target.value}] chosen with strategy` + ` [${bestStrategy}]: [time:${bestSyncPoint.time},` + ` segmentIndex:${bestSyncPoint.segmentIndex}` + (typeof bestSyncPoint.partIndex === 'number' ? `,partIndex:${bestSyncPoint.partIndex}` : '') + ']');\n return bestSyncPoint;\n }\n /**\n * Save any meta-data present on the segments when segments leave\n * the live window to the playlist to allow for synchronization at the\n * playlist level later.\n *\n * @param {Playlist} oldPlaylist - The previous active playlist\n * @param {Playlist} newPlaylist - The updated and most current playlist\n */\n\n saveExpiredSegmentInfo(oldPlaylist, newPlaylist) {\n const mediaSequenceDiff = newPlaylist.mediaSequence - oldPlaylist.mediaSequence; // Ignore large media sequence gaps\n\n if (mediaSequenceDiff > MAX_MEDIA_SEQUENCE_DIFF_FOR_SYNC) {\n videojs.log.warn(`Not saving expired segment info. Media sequence gap ${mediaSequenceDiff} is too large.`);\n return;\n } // When a segment expires from the playlist and it has a start time\n // save that information as a possible sync-point reference in future\n\n for (let i = mediaSequenceDiff - 1; i >= 0; i--) {\n const lastRemovedSegment = oldPlaylist.segments[i];\n if (lastRemovedSegment && typeof lastRemovedSegment.start !== 'undefined') {\n newPlaylist.syncInfo = {\n mediaSequence: oldPlaylist.mediaSequence + i,\n time: lastRemovedSegment.start\n };\n this.logger_(`playlist refresh sync: [time:${newPlaylist.syncInfo.time},` + ` mediaSequence: ${newPlaylist.syncInfo.mediaSequence}]`);\n this.trigger('syncinfoupdate');\n break;\n }\n }\n }\n /**\n * Save the mapping from playlist's ProgramDateTime to display. This should only happen\n * before segments start to load.\n *\n * @param {Playlist} playlist - The currently active playlist\n */\n\n setDateTimeMappingForStart(playlist) {\n // It's possible for the playlist to be updated before playback starts, meaning time\n // zero is not yet set. If, during these playlist refreshes, a discontinuity is\n // crossed, then the old time zero mapping (for the prior timeline) would be retained\n // unless the mappings are cleared.\n this.timelineToDatetimeMappings = {};\n if (playlist.segments && playlist.segments.length && playlist.segments[0].dateTimeObject) {\n const firstSegment = playlist.segments[0];\n const playlistTimestamp = firstSegment.dateTimeObject.getTime() / 1000;\n this.timelineToDatetimeMappings[firstSegment.timeline] = -playlistTimestamp;\n }\n }\n /**\n * Calculates and saves timeline mappings, playlist sync info, and segment timing values\n * based on the latest timing information.\n *\n * @param {Object} options\n * Options object\n * @param {SegmentInfo} options.segmentInfo\n * The current active request information\n * @param {boolean} options.shouldSaveTimelineMapping\n * If there's a timeline change, determines if the timeline mapping should be\n * saved for timeline mapping and program date time mappings.\n */\n\n saveSegmentTimingInfo({\n segmentInfo,\n shouldSaveTimelineMapping\n }) {\n const didCalculateSegmentTimeMapping = this.calculateSegmentTimeMapping_(segmentInfo, segmentInfo.timingInfo, shouldSaveTimelineMapping);\n const segment = segmentInfo.segment;\n if (didCalculateSegmentTimeMapping) {\n this.saveDiscontinuitySyncInfo_(segmentInfo); // If the playlist does not have sync information yet, record that information\n // now with segment timing information\n\n if (!segmentInfo.playlist.syncInfo) {\n segmentInfo.playlist.syncInfo = {\n mediaSequence: segmentInfo.playlist.mediaSequence + segmentInfo.mediaIndex,\n time: segment.start\n };\n }\n }\n const dateTime = segment.dateTimeObject;\n if (segment.discontinuity && shouldSaveTimelineMapping && dateTime) {\n this.timelineToDatetimeMappings[segment.timeline] = -(dateTime.getTime() / 1000);\n }\n }\n timestampOffsetForTimeline(timeline) {\n if (typeof this.timelines[timeline] === 'undefined') {\n return null;\n }\n return this.timelines[timeline].time;\n }\n mappingForTimeline(timeline) {\n if (typeof this.timelines[timeline] === 'undefined') {\n return null;\n }\n return this.timelines[timeline].mapping;\n }\n /**\n * Use the \"media time\" for a segment to generate a mapping to \"display time\" and\n * save that display time to the segment.\n *\n * @private\n * @param {SegmentInfo} segmentInfo\n * The current active request information\n * @param {Object} timingInfo\n * The start and end time of the current segment in \"media time\"\n * @param {boolean} shouldSaveTimelineMapping\n * If there's a timeline change, determines if the timeline mapping should be\n * saved in timelines.\n * @return {boolean}\n * Returns false if segment time mapping could not be calculated\n */\n\n calculateSegmentTimeMapping_(segmentInfo, timingInfo, shouldSaveTimelineMapping) {\n // TODO: remove side effects\n const segment = segmentInfo.segment;\n const part = segmentInfo.part;\n let mappingObj = this.timelines[segmentInfo.timeline];\n let start;\n let end;\n if (typeof segmentInfo.timestampOffset === 'number') {\n mappingObj = {\n time: segmentInfo.startOfSegment,\n mapping: segmentInfo.startOfSegment - timingInfo.start\n };\n if (shouldSaveTimelineMapping) {\n this.timelines[segmentInfo.timeline] = mappingObj;\n this.trigger('timestampoffset');\n this.logger_(`time mapping for timeline ${segmentInfo.timeline}: ` + `[time: ${mappingObj.time}] [mapping: ${mappingObj.mapping}]`);\n }\n start = segmentInfo.startOfSegment;\n end = timingInfo.end + mappingObj.mapping;\n } else if (mappingObj) {\n start = timingInfo.start + mappingObj.mapping;\n end = timingInfo.end + mappingObj.mapping;\n } else {\n return false;\n }\n if (part) {\n part.start = start;\n part.end = end;\n } // If we don't have a segment start yet or the start value we got\n // is less than our current segment.start value, save a new start value.\n // We have to do this because parts will have segment timing info saved\n // multiple times and we want segment start to be the earliest part start\n // value for that segment.\n\n if (!segment.start || start < segment.start) {\n segment.start = start;\n }\n segment.end = end;\n return true;\n }\n /**\n * Each time we have discontinuity in the playlist, attempt to calculate the location\n * in display of the start of the discontinuity and save that. We also save an accuracy\n * value so that we save values with the most accuracy (closest to 0.)\n *\n * @private\n * @param {SegmentInfo} segmentInfo - The current active request information\n */\n\n saveDiscontinuitySyncInfo_(segmentInfo) {\n const playlist = segmentInfo.playlist;\n const segment = segmentInfo.segment; // If the current segment is a discontinuity then we know exactly where\n // the start of the range and it's accuracy is 0 (greater accuracy values\n // mean more approximation)\n\n if (segment.discontinuity) {\n this.discontinuities[segment.timeline] = {\n time: segment.start,\n accuracy: 0\n };\n } else if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) {\n // Search for future discontinuities that we can provide better timing\n // information for and save that information for sync purposes\n for (let i = 0; i < playlist.discontinuityStarts.length; i++) {\n const segmentIndex = playlist.discontinuityStarts[i];\n const discontinuity = playlist.discontinuitySequence + i + 1;\n const mediaIndexDiff = segmentIndex - segmentInfo.mediaIndex;\n const accuracy = Math.abs(mediaIndexDiff);\n if (!this.discontinuities[discontinuity] || this.discontinuities[discontinuity].accuracy > accuracy) {\n let time;\n if (mediaIndexDiff < 0) {\n time = segment.start - sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: playlist.segments,\n startIndex: segmentInfo.mediaIndex,\n endIndex: segmentIndex\n });\n } else {\n time = segment.end + sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: playlist.segments,\n startIndex: segmentInfo.mediaIndex + 1,\n endIndex: segmentIndex\n });\n }\n this.discontinuities[discontinuity] = {\n time,\n accuracy\n };\n }\n }\n }\n }\n dispose() {\n this.trigger('dispose');\n this.off();\n }\n}\n\n/**\n * The TimelineChangeController acts as a source for segment loaders to listen for and\n * keep track of latest and pending timeline changes. This is useful to ensure proper\n * sync, as each loader may need to make a consideration for what timeline the other\n * loader is on before making changes which could impact the other loader's media.\n *\n * @class TimelineChangeController\n * @extends videojs.EventTarget\n */\n\nclass TimelineChangeController extends videojs.EventTarget {\n constructor() {\n super();\n this.pendingTimelineChanges_ = {};\n this.lastTimelineChanges_ = {};\n }\n clearPendingTimelineChange(type) {\n this.pendingTimelineChanges_[type] = null;\n this.trigger('pendingtimelinechange');\n }\n pendingTimelineChange({\n type,\n from,\n to\n }) {\n if (typeof from === 'number' && typeof to === 'number') {\n this.pendingTimelineChanges_[type] = {\n type,\n from,\n to\n };\n this.trigger('pendingtimelinechange');\n }\n return this.pendingTimelineChanges_[type];\n }\n lastTimelineChange({\n type,\n from,\n to\n }) {\n if (typeof from === 'number' && typeof to === 'number') {\n this.lastTimelineChanges_[type] = {\n type,\n from,\n to\n };\n delete this.pendingTimelineChanges_[type];\n const metadata = {\n timelineChangeInfo: {\n from,\n to\n }\n };\n this.trigger({\n type: 'timelinechange',\n metadata\n });\n }\n return this.lastTimelineChanges_[type];\n }\n dispose() {\n this.trigger('dispose');\n this.pendingTimelineChanges_ = {};\n this.lastTimelineChanges_ = {};\n this.off();\n }\n}\n\n/* rollup-plugin-worker-factory start for worker!/home/runner/work/http-streaming/http-streaming/src/decrypter-worker.js */\nconst workerCode = transform(getWorkerString(function () {\n /**\n * @file stream.js\n */\n\n /**\n * A lightweight readable stream implemention that handles event dispatching.\n *\n * @class Stream\n */\n\n var Stream = /*#__PURE__*/function () {\n function Stream() {\n this.listeners = {};\n }\n /**\n * Add a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener the callback to be invoked when an event of\n * the specified type occurs\n */\n\n var _proto = Stream.prototype;\n _proto.on = function on(type, listener) {\n if (!this.listeners[type]) {\n this.listeners[type] = [];\n }\n this.listeners[type].push(listener);\n }\n /**\n * Remove a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener a function previously registered for this\n * type of event through `on`\n * @return {boolean} if we could turn it off or not\n */;\n _proto.off = function off(type, listener) {\n if (!this.listeners[type]) {\n return false;\n }\n var index = this.listeners[type].indexOf(listener); // TODO: which is better?\n // In Video.js we slice listener functions\n // on trigger so that it does not mess up the order\n // while we loop through.\n //\n // Here we slice on off so that the loop in trigger\n // can continue using it's old reference to loop without\n // messing up the order.\n\n this.listeners[type] = this.listeners[type].slice(0);\n this.listeners[type].splice(index, 1);\n return index > -1;\n }\n /**\n * Trigger an event of the specified type on this stream. Any additional\n * arguments to this function are passed as parameters to event listeners.\n *\n * @param {string} type the event name\n */;\n _proto.trigger = function trigger(type) {\n var callbacks = this.listeners[type];\n if (!callbacks) {\n return;\n } // Slicing the arguments on every invocation of this method\n // can add a significant amount of overhead. Avoid the\n // intermediate object creation for the common case of a\n // single callback argument\n\n if (arguments.length === 2) {\n var length = callbacks.length;\n for (var i = 0; i < length; ++i) {\n callbacks[i].call(this, arguments[1]);\n }\n } else {\n var args = Array.prototype.slice.call(arguments, 1);\n var _length = callbacks.length;\n for (var _i = 0; _i < _length; ++_i) {\n callbacks[_i].apply(this, args);\n }\n }\n }\n /**\n * Destroys the stream and cleans up.\n */;\n _proto.dispose = function dispose() {\n this.listeners = {};\n }\n /**\n * Forwards all `data` events on this stream to the destination stream. The\n * destination stream should provide a method `push` to receive the data\n * events as they arrive.\n *\n * @param {Stream} destination the stream that will receive all `data` events\n * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options\n */;\n _proto.pipe = function pipe(destination) {\n this.on('data', function (data) {\n destination.push(data);\n });\n };\n return Stream;\n }();\n /*! @name pkcs7 @version 1.0.4 @license Apache-2.0 */\n\n /**\n * Returns the subarray of a Uint8Array without PKCS#7 padding.\n *\n * @param padded {Uint8Array} unencrypted bytes that have been padded\n * @return {Uint8Array} the unpadded bytes\n * @see http://tools.ietf.org/html/rfc5652\n */\n\n function unpad(padded) {\n return padded.subarray(0, padded.byteLength - padded[padded.byteLength - 1]);\n }\n /*! @name aes-decrypter @version 4.0.1 @license Apache-2.0 */\n\n /**\n * @file aes.js\n *\n * This file contains an adaptation of the AES decryption algorithm\n * from the Standford Javascript Cryptography Library. That work is\n * covered by the following copyright and permissions notice:\n *\n * Copyright 2009-2010 Emily Stark, Mike Hamburg, Dan Boneh.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following\n * disclaimer in the documentation and/or other materials provided\n * with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n * The views and conclusions contained in the software and documentation\n * are those of the authors and should not be interpreted as representing\n * official policies, either expressed or implied, of the authors.\n */\n\n /**\n * Expand the S-box tables.\n *\n * @private\n */\n\n const precompute = function () {\n const tables = [[[], [], [], [], []], [[], [], [], [], []]];\n const encTable = tables[0];\n const decTable = tables[1];\n const sbox = encTable[4];\n const sboxInv = decTable[4];\n let i;\n let x;\n let xInv;\n const d = [];\n const th = [];\n let x2;\n let x4;\n let x8;\n let s;\n let tEnc;\n let tDec; // Compute double and third tables\n\n for (i = 0; i < 256; i++) {\n th[(d[i] = i << 1 ^ (i >> 7) * 283) ^ i] = i;\n }\n for (x = xInv = 0; !sbox[x]; x ^= x2 || 1, xInv = th[xInv] || 1) {\n // Compute sbox\n s = xInv ^ xInv << 1 ^ xInv << 2 ^ xInv << 3 ^ xInv << 4;\n s = s >> 8 ^ s & 255 ^ 99;\n sbox[x] = s;\n sboxInv[s] = x; // Compute MixColumns\n\n x8 = d[x4 = d[x2 = d[x]]];\n tDec = x8 * 0x1010101 ^ x4 * 0x10001 ^ x2 * 0x101 ^ x * 0x1010100;\n tEnc = d[s] * 0x101 ^ s * 0x1010100;\n for (i = 0; i < 4; i++) {\n encTable[i][x] = tEnc = tEnc << 24 ^ tEnc >>> 8;\n decTable[i][s] = tDec = tDec << 24 ^ tDec >>> 8;\n }\n } // Compactify. Considerable speedup on Firefox.\n\n for (i = 0; i < 5; i++) {\n encTable[i] = encTable[i].slice(0);\n decTable[i] = decTable[i].slice(0);\n }\n return tables;\n };\n let aesTables = null;\n /**\n * Schedule out an AES key for both encryption and decryption. This\n * is a low-level class. Use a cipher mode to do bulk encryption.\n *\n * @class AES\n * @param key {Array} The key as an array of 4, 6 or 8 words.\n */\n\n class AES {\n constructor(key) {\n /**\n * The expanded S-box and inverse S-box tables. These will be computed\n * on the client so that we don't have to send them down the wire.\n *\n * There are two tables, _tables[0] is for encryption and\n * _tables[1] is for decryption.\n *\n * The first 4 sub-tables are the expanded S-box with MixColumns. The\n * last (_tables[01][4]) is the S-box itself.\n *\n * @private\n */\n // if we have yet to precompute the S-box tables\n // do so now\n if (!aesTables) {\n aesTables = precompute();\n } // then make a copy of that object for use\n\n this._tables = [[aesTables[0][0].slice(), aesTables[0][1].slice(), aesTables[0][2].slice(), aesTables[0][3].slice(), aesTables[0][4].slice()], [aesTables[1][0].slice(), aesTables[1][1].slice(), aesTables[1][2].slice(), aesTables[1][3].slice(), aesTables[1][4].slice()]];\n let i;\n let j;\n let tmp;\n const sbox = this._tables[0][4];\n const decTable = this._tables[1];\n const keyLen = key.length;\n let rcon = 1;\n if (keyLen !== 4 && keyLen !== 6 && keyLen !== 8) {\n throw new Error('Invalid aes key size');\n }\n const encKey = key.slice(0);\n const decKey = [];\n this._key = [encKey, decKey]; // schedule encryption keys\n\n for (i = keyLen; i < 4 * keyLen + 28; i++) {\n tmp = encKey[i - 1]; // apply sbox\n\n if (i % keyLen === 0 || keyLen === 8 && i % keyLen === 4) {\n tmp = sbox[tmp >>> 24] << 24 ^ sbox[tmp >> 16 & 255] << 16 ^ sbox[tmp >> 8 & 255] << 8 ^ sbox[tmp & 255]; // shift rows and add rcon\n\n if (i % keyLen === 0) {\n tmp = tmp << 8 ^ tmp >>> 24 ^ rcon << 24;\n rcon = rcon << 1 ^ (rcon >> 7) * 283;\n }\n }\n encKey[i] = encKey[i - keyLen] ^ tmp;\n } // schedule decryption keys\n\n for (j = 0; i; j++, i--) {\n tmp = encKey[j & 3 ? i : i - 4];\n if (i <= 4 || j < 4) {\n decKey[j] = tmp;\n } else {\n decKey[j] = decTable[0][sbox[tmp >>> 24]] ^ decTable[1][sbox[tmp >> 16 & 255]] ^ decTable[2][sbox[tmp >> 8 & 255]] ^ decTable[3][sbox[tmp & 255]];\n }\n }\n }\n /**\n * Decrypt 16 bytes, specified as four 32-bit words.\n *\n * @param {number} encrypted0 the first word to decrypt\n * @param {number} encrypted1 the second word to decrypt\n * @param {number} encrypted2 the third word to decrypt\n * @param {number} encrypted3 the fourth word to decrypt\n * @param {Int32Array} out the array to write the decrypted words\n * into\n * @param {number} offset the offset into the output array to start\n * writing results\n * @return {Array} The plaintext.\n */\n\n decrypt(encrypted0, encrypted1, encrypted2, encrypted3, out, offset) {\n const key = this._key[1]; // state variables a,b,c,d are loaded with pre-whitened data\n\n let a = encrypted0 ^ key[0];\n let b = encrypted3 ^ key[1];\n let c = encrypted2 ^ key[2];\n let d = encrypted1 ^ key[3];\n let a2;\n let b2;\n let c2; // key.length === 2 ?\n\n const nInnerRounds = key.length / 4 - 2;\n let i;\n let kIndex = 4;\n const table = this._tables[1]; // load up the tables\n\n const table0 = table[0];\n const table1 = table[1];\n const table2 = table[2];\n const table3 = table[3];\n const sbox = table[4]; // Inner rounds. Cribbed from OpenSSL.\n\n for (i = 0; i < nInnerRounds; i++) {\n a2 = table0[a >>> 24] ^ table1[b >> 16 & 255] ^ table2[c >> 8 & 255] ^ table3[d & 255] ^ key[kIndex];\n b2 = table0[b >>> 24] ^ table1[c >> 16 & 255] ^ table2[d >> 8 & 255] ^ table3[a & 255] ^ key[kIndex + 1];\n c2 = table0[c >>> 24] ^ table1[d >> 16 & 255] ^ table2[a >> 8 & 255] ^ table3[b & 255] ^ key[kIndex + 2];\n d = table0[d >>> 24] ^ table1[a >> 16 & 255] ^ table2[b >> 8 & 255] ^ table3[c & 255] ^ key[kIndex + 3];\n kIndex += 4;\n a = a2;\n b = b2;\n c = c2;\n } // Last round.\n\n for (i = 0; i < 4; i++) {\n out[(3 & -i) + offset] = sbox[a >>> 24] << 24 ^ sbox[b >> 16 & 255] << 16 ^ sbox[c >> 8 & 255] << 8 ^ sbox[d & 255] ^ key[kIndex++];\n a2 = a;\n a = b;\n b = c;\n c = d;\n d = a2;\n }\n }\n }\n /**\n * @file async-stream.js\n */\n\n /**\n * A wrapper around the Stream class to use setTimeout\n * and run stream \"jobs\" Asynchronously\n *\n * @class AsyncStream\n * @extends Stream\n */\n\n class AsyncStream extends Stream {\n constructor() {\n super(Stream);\n this.jobs = [];\n this.delay = 1;\n this.timeout_ = null;\n }\n /**\n * process an async job\n *\n * @private\n */\n\n processJob_() {\n this.jobs.shift()();\n if (this.jobs.length) {\n this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay);\n } else {\n this.timeout_ = null;\n }\n }\n /**\n * push a job into the stream\n *\n * @param {Function} job the job to push into the stream\n */\n\n push(job) {\n this.jobs.push(job);\n if (!this.timeout_) {\n this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay);\n }\n }\n }\n /**\n * @file decrypter.js\n *\n * An asynchronous implementation of AES-128 CBC decryption with\n * PKCS#7 padding.\n */\n\n /**\n * Convert network-order (big-endian) bytes into their little-endian\n * representation.\n */\n\n const ntoh = function (word) {\n return word << 24 | (word & 0xff00) << 8 | (word & 0xff0000) >> 8 | word >>> 24;\n };\n /**\n * Decrypt bytes using AES-128 with CBC and PKCS#7 padding.\n *\n * @param {Uint8Array} encrypted the encrypted bytes\n * @param {Uint32Array} key the bytes of the decryption key\n * @param {Uint32Array} initVector the initialization vector (IV) to\n * use for the first round of CBC.\n * @return {Uint8Array} the decrypted bytes\n *\n * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard\n * @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Block_Chaining_.28CBC.29\n * @see https://tools.ietf.org/html/rfc2315\n */\n\n const decrypt = function (encrypted, key, initVector) {\n // word-level access to the encrypted bytes\n const encrypted32 = new Int32Array(encrypted.buffer, encrypted.byteOffset, encrypted.byteLength >> 2);\n const decipher = new AES(Array.prototype.slice.call(key)); // byte and word-level access for the decrypted output\n\n const decrypted = new Uint8Array(encrypted.byteLength);\n const decrypted32 = new Int32Array(decrypted.buffer); // temporary variables for working with the IV, encrypted, and\n // decrypted data\n\n let init0;\n let init1;\n let init2;\n let init3;\n let encrypted0;\n let encrypted1;\n let encrypted2;\n let encrypted3; // iteration variable\n\n let wordIx; // pull out the words of the IV to ensure we don't modify the\n // passed-in reference and easier access\n\n init0 = initVector[0];\n init1 = initVector[1];\n init2 = initVector[2];\n init3 = initVector[3]; // decrypt four word sequences, applying cipher-block chaining (CBC)\n // to each decrypted block\n\n for (wordIx = 0; wordIx < encrypted32.length; wordIx += 4) {\n // convert big-endian (network order) words into little-endian\n // (javascript order)\n encrypted0 = ntoh(encrypted32[wordIx]);\n encrypted1 = ntoh(encrypted32[wordIx + 1]);\n encrypted2 = ntoh(encrypted32[wordIx + 2]);\n encrypted3 = ntoh(encrypted32[wordIx + 3]); // decrypt the block\n\n decipher.decrypt(encrypted0, encrypted1, encrypted2, encrypted3, decrypted32, wordIx); // XOR with the IV, and restore network byte-order to obtain the\n // plaintext\n\n decrypted32[wordIx] = ntoh(decrypted32[wordIx] ^ init0);\n decrypted32[wordIx + 1] = ntoh(decrypted32[wordIx + 1] ^ init1);\n decrypted32[wordIx + 2] = ntoh(decrypted32[wordIx + 2] ^ init2);\n decrypted32[wordIx + 3] = ntoh(decrypted32[wordIx + 3] ^ init3); // setup the IV for the next round\n\n init0 = encrypted0;\n init1 = encrypted1;\n init2 = encrypted2;\n init3 = encrypted3;\n }\n return decrypted;\n };\n /**\n * The `Decrypter` class that manages decryption of AES\n * data through `AsyncStream` objects and the `decrypt`\n * function\n *\n * @param {Uint8Array} encrypted the encrypted bytes\n * @param {Uint32Array} key the bytes of the decryption key\n * @param {Uint32Array} initVector the initialization vector (IV) to\n * @param {Function} done the function to run when done\n * @class Decrypter\n */\n\n class Decrypter {\n constructor(encrypted, key, initVector, done) {\n const step = Decrypter.STEP;\n const encrypted32 = new Int32Array(encrypted.buffer);\n const decrypted = new Uint8Array(encrypted.byteLength);\n let i = 0;\n this.asyncStream_ = new AsyncStream(); // split up the encryption job and do the individual chunks asynchronously\n\n this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted));\n for (i = step; i < encrypted32.length; i += step) {\n initVector = new Uint32Array([ntoh(encrypted32[i - 4]), ntoh(encrypted32[i - 3]), ntoh(encrypted32[i - 2]), ntoh(encrypted32[i - 1])]);\n this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted));\n } // invoke the done() callback when everything is finished\n\n this.asyncStream_.push(function () {\n // remove pkcs#7 padding from the decrypted bytes\n done(null, unpad(decrypted));\n });\n }\n /**\n * a getter for step the maximum number of bytes to process at one time\n *\n * @return {number} the value of step 32000\n */\n\n static get STEP() {\n // 4 * 8000;\n return 32000;\n }\n /**\n * @private\n */\n\n decryptChunk_(encrypted, key, initVector, decrypted) {\n return function () {\n const bytes = decrypt(encrypted, key, initVector);\n decrypted.set(bytes, encrypted.byteOffset);\n };\n }\n }\n var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};\n var win;\n if (typeof window !== \"undefined\") {\n win = window;\n } else if (typeof commonjsGlobal !== \"undefined\") {\n win = commonjsGlobal;\n } else if (typeof self !== \"undefined\") {\n win = self;\n } else {\n win = {};\n }\n var window_1 = win;\n var isArrayBufferView = function isArrayBufferView(obj) {\n if (ArrayBuffer.isView === 'function') {\n return ArrayBuffer.isView(obj);\n }\n return obj && obj.buffer instanceof ArrayBuffer;\n };\n var BigInt = window_1.BigInt || Number;\n [BigInt('0x1'), BigInt('0x100'), BigInt('0x10000'), BigInt('0x1000000'), BigInt('0x100000000'), BigInt('0x10000000000'), BigInt('0x1000000000000'), BigInt('0x100000000000000'), BigInt('0x10000000000000000')];\n (function () {\n var a = new Uint16Array([0xFFCC]);\n var b = new Uint8Array(a.buffer, a.byteOffset, a.byteLength);\n if (b[0] === 0xFF) {\n return 'big';\n }\n if (b[0] === 0xCC) {\n return 'little';\n }\n return 'unknown';\n })();\n /**\n * Creates an object for sending to a web worker modifying properties that are TypedArrays\n * into a new object with seperated properties for the buffer, byteOffset, and byteLength.\n *\n * @param {Object} message\n * Object of properties and values to send to the web worker\n * @return {Object}\n * Modified message with TypedArray values expanded\n * @function createTransferableMessage\n */\n\n const createTransferableMessage = function (message) {\n const transferable = {};\n Object.keys(message).forEach(key => {\n const value = message[key];\n if (isArrayBufferView(value)) {\n transferable[key] = {\n bytes: value.buffer,\n byteOffset: value.byteOffset,\n byteLength: value.byteLength\n };\n } else {\n transferable[key] = value;\n }\n });\n return transferable;\n };\n /* global self */\n\n /**\n * Our web worker interface so that things can talk to aes-decrypter\n * that will be running in a web worker. the scope is passed to this by\n * webworkify.\n */\n\n self.onmessage = function (event) {\n const data = event.data;\n const encrypted = new Uint8Array(data.encrypted.bytes, data.encrypted.byteOffset, data.encrypted.byteLength);\n const key = new Uint32Array(data.key.bytes, data.key.byteOffset, data.key.byteLength / 4);\n const iv = new Uint32Array(data.iv.bytes, data.iv.byteOffset, data.iv.byteLength / 4);\n /* eslint-disable no-new, handle-callback-err */\n\n new Decrypter(encrypted, key, iv, function (err, bytes) {\n self.postMessage(createTransferableMessage({\n source: data.source,\n decrypted: bytes\n }), [bytes.buffer]);\n });\n /* eslint-enable */\n };\n}));\nvar Decrypter = factory(workerCode);\n/* rollup-plugin-worker-factory end for worker!/home/runner/work/http-streaming/http-streaming/src/decrypter-worker.js */\n\n/**\n * Convert the properties of an HLS track into an audioTrackKind.\n *\n * @private\n */\n\nconst audioTrackKind_ = properties => {\n let kind = properties.default ? 'main' : 'alternative';\n if (properties.characteristics && properties.characteristics.indexOf('public.accessibility.describes-video') >= 0) {\n kind = 'main-desc';\n }\n return kind;\n};\n/**\n * Pause provided segment loader and playlist loader if active\n *\n * @param {SegmentLoader} segmentLoader\n * SegmentLoader to pause\n * @param {Object} mediaType\n * Active media type\n * @function stopLoaders\n */\n\nconst stopLoaders = (segmentLoader, mediaType) => {\n segmentLoader.abort();\n segmentLoader.pause();\n if (mediaType && mediaType.activePlaylistLoader) {\n mediaType.activePlaylistLoader.pause();\n mediaType.activePlaylistLoader = null;\n }\n};\n/**\n * Start loading provided segment loader and playlist loader\n *\n * @param {PlaylistLoader} playlistLoader\n * PlaylistLoader to start loading\n * @param {Object} mediaType\n * Active media type\n * @function startLoaders\n */\n\nconst startLoaders = (playlistLoader, mediaType) => {\n // Segment loader will be started after `loadedmetadata` or `loadedplaylist` from the\n // playlist loader\n mediaType.activePlaylistLoader = playlistLoader;\n playlistLoader.load();\n};\n/**\n * Returns a function to be called when the media group changes. It performs a\n * non-destructive (preserve the buffer) resync of the SegmentLoader. This is because a\n * change of group is merely a rendition switch of the same content at another encoding,\n * rather than a change of content, such as switching audio from English to Spanish.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Handler for a non-destructive resync of SegmentLoader when the active media\n * group changes.\n * @function onGroupChanged\n */\n\nconst onGroupChanged = (type, settings) => () => {\n const {\n segmentLoaders: {\n [type]: segmentLoader,\n main: mainSegmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n const activeTrack = mediaType.activeTrack();\n const activeGroup = mediaType.getActiveGroup();\n const previousActiveLoader = mediaType.activePlaylistLoader;\n const lastGroup = mediaType.lastGroup_; // the group did not change do nothing\n\n if (activeGroup && lastGroup && activeGroup.id === lastGroup.id) {\n return;\n }\n mediaType.lastGroup_ = activeGroup;\n mediaType.lastTrack_ = activeTrack;\n stopLoaders(segmentLoader, mediaType);\n if (!activeGroup || activeGroup.isMainPlaylist) {\n // there is no group active or active group is a main playlist and won't change\n return;\n }\n if (!activeGroup.playlistLoader) {\n if (previousActiveLoader) {\n // The previous group had a playlist loader but the new active group does not\n // this means we are switching from demuxed to muxed audio. In this case we want to\n // do a destructive reset of the main segment loader and not restart the audio\n // loaders.\n mainSegmentLoader.resetEverything();\n }\n return;\n } // Non-destructive resync\n\n segmentLoader.resyncLoader();\n startLoaders(activeGroup.playlistLoader, mediaType);\n};\nconst onGroupChanging = (type, settings) => () => {\n const {\n segmentLoaders: {\n [type]: segmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n mediaType.lastGroup_ = null;\n segmentLoader.abort();\n segmentLoader.pause();\n};\n/**\n * Returns a function to be called when the media track changes. It performs a\n * destructive reset of the SegmentLoader to ensure we start loading as close to\n * currentTime as possible.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Handler for a destructive reset of SegmentLoader when the active media\n * track changes.\n * @function onTrackChanged\n */\n\nconst onTrackChanged = (type, settings) => () => {\n const {\n mainPlaylistLoader,\n segmentLoaders: {\n [type]: segmentLoader,\n main: mainSegmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n const activeTrack = mediaType.activeTrack();\n const activeGroup = mediaType.getActiveGroup();\n const previousActiveLoader = mediaType.activePlaylistLoader;\n const lastTrack = mediaType.lastTrack_; // track did not change, do nothing\n\n if (lastTrack && activeTrack && lastTrack.id === activeTrack.id) {\n return;\n }\n mediaType.lastGroup_ = activeGroup;\n mediaType.lastTrack_ = activeTrack;\n stopLoaders(segmentLoader, mediaType);\n if (!activeGroup) {\n // there is no group active so we do not want to restart loaders\n return;\n }\n if (activeGroup.isMainPlaylist) {\n // track did not change, do nothing\n if (!activeTrack || !lastTrack || activeTrack.id === lastTrack.id) {\n return;\n }\n const pc = settings.vhs.playlistController_;\n const newPlaylist = pc.selectPlaylist(); // media will not change do nothing\n\n if (pc.media() === newPlaylist) {\n return;\n }\n mediaType.logger_(`track change. Switching main audio from ${lastTrack.id} to ${activeTrack.id}`);\n mainPlaylistLoader.pause();\n mainSegmentLoader.resetEverything();\n pc.fastQualityChange_(newPlaylist);\n return;\n }\n if (type === 'AUDIO') {\n if (!activeGroup.playlistLoader) {\n // when switching from demuxed audio/video to muxed audio/video (noted by no\n // playlist loader for the audio group), we want to do a destructive reset of the\n // main segment loader and not restart the audio loaders\n mainSegmentLoader.setAudio(true); // don't have to worry about disabling the audio of the audio segment loader since\n // it should be stopped\n\n mainSegmentLoader.resetEverything();\n return;\n } // although the segment loader is an audio segment loader, call the setAudio\n // function to ensure it is prepared to re-append the init segment (or handle other\n // config changes)\n\n segmentLoader.setAudio(true);\n mainSegmentLoader.setAudio(false);\n }\n if (previousActiveLoader === activeGroup.playlistLoader) {\n // Nothing has actually changed. This can happen because track change events can fire\n // multiple times for a \"single\" change. One for enabling the new active track, and\n // one for disabling the track that was active\n startLoaders(activeGroup.playlistLoader, mediaType);\n return;\n }\n if (segmentLoader.track) {\n // For WebVTT, set the new text track in the segmentloader\n segmentLoader.track(activeTrack);\n } // destructive reset\n\n segmentLoader.resetEverything();\n startLoaders(activeGroup.playlistLoader, mediaType);\n};\nconst onError = {\n /**\n * Returns a function to be called when a SegmentLoader or PlaylistLoader encounters\n * an error.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Error handler. Logs warning (or error if the playlist is excluded) to\n * console and switches back to default audio track.\n * @function onError.AUDIO\n */\n AUDIO: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: mediaType\n },\n excludePlaylist\n } = settings; // switch back to default audio track\n\n const activeTrack = mediaType.activeTrack();\n const activeGroup = mediaType.activeGroup();\n const id = (activeGroup.filter(group => group.default)[0] || activeGroup[0]).id;\n const defaultTrack = mediaType.tracks[id];\n if (activeTrack === defaultTrack) {\n // Default track encountered an error. All we can do now is exclude the current\n // rendition and hope another will switch audio groups\n excludePlaylist({\n error: {\n message: 'Problem encountered loading the default audio track.'\n }\n });\n return;\n }\n videojs.log.warn('Problem encountered loading the alternate audio track.' + 'Switching back to default.');\n for (const trackId in mediaType.tracks) {\n mediaType.tracks[trackId].enabled = mediaType.tracks[trackId] === defaultTrack;\n }\n mediaType.onTrackChanged();\n },\n /**\n * Returns a function to be called when a SegmentLoader or PlaylistLoader encounters\n * an error.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Error handler. Logs warning to console and disables the active subtitle track\n * @function onError.SUBTITLES\n */\n SUBTITLES: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n videojs.log.warn('Problem encountered loading the subtitle track.' + 'Disabling subtitle track.');\n const track = mediaType.activeTrack();\n if (track) {\n track.mode = 'disabled';\n }\n mediaType.onTrackChanged();\n }\n};\nconst setupListeners = {\n /**\n * Setup event listeners for audio playlist loader\n *\n * @param {string} type\n * MediaGroup type\n * @param {PlaylistLoader|null} playlistLoader\n * PlaylistLoader to register listeners on\n * @param {Object} settings\n * Object containing required information for media groups\n * @function setupListeners.AUDIO\n */\n AUDIO: (type, playlistLoader, settings) => {\n if (!playlistLoader) {\n // no playlist loader means audio will be muxed with the video\n return;\n }\n const {\n tech,\n requestOptions,\n segmentLoaders: {\n [type]: segmentLoader\n }\n } = settings;\n playlistLoader.on('loadedmetadata', () => {\n const media = playlistLoader.media();\n segmentLoader.playlist(media, requestOptions); // if the video is already playing, or if this isn't a live video and preload\n // permits, start downloading segments\n\n if (!tech.paused() || media.endList && tech.preload() !== 'none') {\n segmentLoader.load();\n }\n });\n playlistLoader.on('loadedplaylist', () => {\n segmentLoader.playlist(playlistLoader.media(), requestOptions); // If the player isn't paused, ensure that the segment loader is running\n\n if (!tech.paused()) {\n segmentLoader.load();\n }\n });\n playlistLoader.on('error', onError[type](type, settings));\n },\n /**\n * Setup event listeners for subtitle playlist loader\n *\n * @param {string} type\n * MediaGroup type\n * @param {PlaylistLoader|null} playlistLoader\n * PlaylistLoader to register listeners on\n * @param {Object} settings\n * Object containing required information for media groups\n * @function setupListeners.SUBTITLES\n */\n SUBTITLES: (type, playlistLoader, settings) => {\n const {\n tech,\n requestOptions,\n segmentLoaders: {\n [type]: segmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n playlistLoader.on('loadedmetadata', () => {\n const media = playlistLoader.media();\n segmentLoader.playlist(media, requestOptions);\n segmentLoader.track(mediaType.activeTrack()); // if the video is already playing, or if this isn't a live video and preload\n // permits, start downloading segments\n\n if (!tech.paused() || media.endList && tech.preload() !== 'none') {\n segmentLoader.load();\n }\n });\n playlistLoader.on('loadedplaylist', () => {\n segmentLoader.playlist(playlistLoader.media(), requestOptions); // If the player isn't paused, ensure that the segment loader is running\n\n if (!tech.paused()) {\n segmentLoader.load();\n }\n });\n playlistLoader.on('error', onError[type](type, settings));\n }\n};\nconst initialize = {\n /**\n * Setup PlaylistLoaders and AudioTracks for the audio groups\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @function initialize.AUDIO\n */\n 'AUDIO': (type, settings) => {\n const {\n vhs,\n sourceType,\n segmentLoaders: {\n [type]: segmentLoader\n },\n requestOptions,\n main: {\n mediaGroups\n },\n mediaTypes: {\n [type]: {\n groups,\n tracks,\n logger_\n }\n },\n mainPlaylistLoader\n } = settings;\n const audioOnlyMain = isAudioOnly(mainPlaylistLoader.main); // force a default if we have none\n\n if (!mediaGroups[type] || Object.keys(mediaGroups[type]).length === 0) {\n mediaGroups[type] = {\n main: {\n default: {\n default: true\n }\n }\n };\n if (audioOnlyMain) {\n mediaGroups[type].main.default.playlists = mainPlaylistLoader.main.playlists;\n }\n }\n for (const groupId in mediaGroups[type]) {\n if (!groups[groupId]) {\n groups[groupId] = [];\n }\n for (const variantLabel in mediaGroups[type][groupId]) {\n let properties = mediaGroups[type][groupId][variantLabel];\n let playlistLoader;\n if (audioOnlyMain) {\n logger_(`AUDIO group '${groupId}' label '${variantLabel}' is a main playlist`);\n properties.isMainPlaylist = true;\n playlistLoader = null; // if vhs-json was provided as the source, and the media playlist was resolved,\n // use the resolved media playlist object\n } else if (sourceType === 'vhs-json' && properties.playlists) {\n playlistLoader = new PlaylistLoader(properties.playlists[0], vhs, requestOptions);\n } else if (properties.resolvedUri) {\n playlistLoader = new PlaylistLoader(properties.resolvedUri, vhs, requestOptions); // TODO: dash isn't the only type with properties.playlists\n // should we even have properties.playlists in this check.\n } else if (properties.playlists && sourceType === 'dash') {\n playlistLoader = new DashPlaylistLoader(properties.playlists[0], vhs, requestOptions, mainPlaylistLoader);\n } else {\n // no resolvedUri means the audio is muxed with the video when using this\n // audio track\n playlistLoader = null;\n }\n properties = merge({\n id: variantLabel,\n playlistLoader\n }, properties);\n setupListeners[type](type, properties.playlistLoader, settings);\n groups[groupId].push(properties);\n if (typeof tracks[variantLabel] === 'undefined') {\n const track = new videojs.AudioTrack({\n id: variantLabel,\n kind: audioTrackKind_(properties),\n enabled: false,\n language: properties.language,\n default: properties.default,\n label: variantLabel\n });\n tracks[variantLabel] = track;\n }\n }\n } // setup single error event handler for the segment loader\n\n segmentLoader.on('error', onError[type](type, settings));\n },\n /**\n * Setup PlaylistLoaders and TextTracks for the subtitle groups\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @function initialize.SUBTITLES\n */\n 'SUBTITLES': (type, settings) => {\n const {\n tech,\n vhs,\n sourceType,\n segmentLoaders: {\n [type]: segmentLoader\n },\n requestOptions,\n main: {\n mediaGroups\n },\n mediaTypes: {\n [type]: {\n groups,\n tracks\n }\n },\n mainPlaylistLoader\n } = settings;\n for (const groupId in mediaGroups[type]) {\n if (!groups[groupId]) {\n groups[groupId] = [];\n }\n for (const variantLabel in mediaGroups[type][groupId]) {\n if (!vhs.options_.useForcedSubtitles && mediaGroups[type][groupId][variantLabel].forced) {\n // Subtitle playlists with the forced attribute are not selectable in Safari.\n // According to Apple's HLS Authoring Specification:\n // If content has forced subtitles and regular subtitles in a given language,\n // the regular subtitles track in that language MUST contain both the forced\n // subtitles and the regular subtitles for that language.\n // Because of this requirement and that Safari does not add forced subtitles,\n // forced subtitles are skipped here to maintain consistent experience across\n // all platforms\n continue;\n }\n let properties = mediaGroups[type][groupId][variantLabel];\n let playlistLoader;\n if (sourceType === 'hls') {\n playlistLoader = new PlaylistLoader(properties.resolvedUri, vhs, requestOptions);\n } else if (sourceType === 'dash') {\n const playlists = properties.playlists.filter(p => p.excludeUntil !== Infinity);\n if (!playlists.length) {\n return;\n }\n playlistLoader = new DashPlaylistLoader(properties.playlists[0], vhs, requestOptions, mainPlaylistLoader);\n } else if (sourceType === 'vhs-json') {\n playlistLoader = new PlaylistLoader(\n // if the vhs-json object included the media playlist, use the media playlist\n // as provided, otherwise use the resolved URI to load the playlist\n properties.playlists ? properties.playlists[0] : properties.resolvedUri, vhs, requestOptions);\n }\n properties = merge({\n id: variantLabel,\n playlistLoader\n }, properties);\n setupListeners[type](type, properties.playlistLoader, settings);\n groups[groupId].push(properties);\n if (typeof tracks[variantLabel] === 'undefined') {\n const track = tech.addRemoteTextTrack({\n id: variantLabel,\n kind: 'subtitles',\n default: properties.default && properties.autoselect,\n language: properties.language,\n label: variantLabel\n }, false).track;\n tracks[variantLabel] = track;\n }\n }\n } // setup single error event handler for the segment loader\n\n segmentLoader.on('error', onError[type](type, settings));\n },\n /**\n * Setup TextTracks for the closed-caption groups\n *\n * @param {String} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @function initialize['CLOSED-CAPTIONS']\n */\n 'CLOSED-CAPTIONS': (type, settings) => {\n const {\n tech,\n main: {\n mediaGroups\n },\n mediaTypes: {\n [type]: {\n groups,\n tracks\n }\n }\n } = settings;\n for (const groupId in mediaGroups[type]) {\n if (!groups[groupId]) {\n groups[groupId] = [];\n }\n for (const variantLabel in mediaGroups[type][groupId]) {\n const properties = mediaGroups[type][groupId][variantLabel]; // Look for either 608 (CCn) or 708 (SERVICEn) caption services\n\n if (!/^(?:CC|SERVICE)/.test(properties.instreamId)) {\n continue;\n }\n const captionServices = tech.options_.vhs && tech.options_.vhs.captionServices || {};\n let newProps = {\n label: variantLabel,\n language: properties.language,\n instreamId: properties.instreamId,\n default: properties.default && properties.autoselect\n };\n if (captionServices[newProps.instreamId]) {\n newProps = merge(newProps, captionServices[newProps.instreamId]);\n }\n if (newProps.default === undefined) {\n delete newProps.default;\n } // No PlaylistLoader is required for Closed-Captions because the captions are\n // embedded within the video stream\n\n groups[groupId].push(merge({\n id: variantLabel\n }, properties));\n if (typeof tracks[variantLabel] === 'undefined') {\n const track = tech.addRemoteTextTrack({\n id: newProps.instreamId,\n kind: 'captions',\n default: newProps.default,\n language: newProps.language,\n label: newProps.label\n }, false).track;\n tracks[variantLabel] = track;\n }\n }\n }\n }\n};\nconst groupMatch = (list, media) => {\n for (let i = 0; i < list.length; i++) {\n if (playlistMatch(media, list[i])) {\n return true;\n }\n if (list[i].playlists && groupMatch(list[i].playlists, media)) {\n return true;\n }\n }\n return false;\n};\n/**\n * Returns a function used to get the active group of the provided type\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Function that returns the active media group for the provided type. Takes an\n * optional parameter {TextTrack} track. If no track is provided, a list of all\n * variants in the group, otherwise the variant corresponding to the provided\n * track is returned.\n * @function activeGroup\n */\n\nconst activeGroup = (type, settings) => track => {\n const {\n mainPlaylistLoader,\n mediaTypes: {\n [type]: {\n groups\n }\n }\n } = settings;\n const media = mainPlaylistLoader.media();\n if (!media) {\n return null;\n }\n let variants = null; // set to variants to main media active group\n\n if (media.attributes[type]) {\n variants = groups[media.attributes[type]];\n }\n const groupKeys = Object.keys(groups);\n if (!variants) {\n // find the mainPlaylistLoader media\n // that is in a media group if we are dealing\n // with audio only\n if (type === 'AUDIO' && groupKeys.length > 1 && isAudioOnly(settings.main)) {\n for (let i = 0; i < groupKeys.length; i++) {\n const groupPropertyList = groups[groupKeys[i]];\n if (groupMatch(groupPropertyList, media)) {\n variants = groupPropertyList;\n break;\n }\n } // use the main group if it exists\n } else if (groups.main) {\n variants = groups.main; // only one group, use that one\n } else if (groupKeys.length === 1) {\n variants = groups[groupKeys[0]];\n }\n }\n if (typeof track === 'undefined') {\n return variants;\n }\n if (track === null || !variants) {\n // An active track was specified so a corresponding group is expected. track === null\n // means no track is currently active so there is no corresponding group\n return null;\n }\n return variants.filter(props => props.id === track.id)[0] || null;\n};\nconst activeTrack = {\n /**\n * Returns a function used to get the active track of type provided\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Function that returns the active media track for the provided type. Returns\n * null if no track is active\n * @function activeTrack.AUDIO\n */\n AUDIO: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: {\n tracks\n }\n }\n } = settings;\n for (const id in tracks) {\n if (tracks[id].enabled) {\n return tracks[id];\n }\n }\n return null;\n },\n /**\n * Returns a function used to get the active track of type provided\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Function that returns the active media track for the provided type. Returns\n * null if no track is active\n * @function activeTrack.SUBTITLES\n */\n SUBTITLES: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: {\n tracks\n }\n }\n } = settings;\n for (const id in tracks) {\n if (tracks[id].mode === 'showing' || tracks[id].mode === 'hidden') {\n return tracks[id];\n }\n }\n return null;\n }\n};\nconst getActiveGroup = (type, {\n mediaTypes\n}) => () => {\n const activeTrack_ = mediaTypes[type].activeTrack();\n if (!activeTrack_) {\n return null;\n }\n return mediaTypes[type].activeGroup(activeTrack_);\n};\n/**\n * Setup PlaylistLoaders and Tracks for media groups (Audio, Subtitles,\n * Closed-Captions) specified in the main manifest.\n *\n * @param {Object} settings\n * Object containing required information for setting up the media groups\n * @param {Tech} settings.tech\n * The tech of the player\n * @param {Object} settings.requestOptions\n * XHR request options used by the segment loaders\n * @param {PlaylistLoader} settings.mainPlaylistLoader\n * PlaylistLoader for the main source\n * @param {VhsHandler} settings.vhs\n * VHS SourceHandler\n * @param {Object} settings.main\n * The parsed main manifest\n * @param {Object} settings.mediaTypes\n * Object to store the loaders, tracks, and utility methods for each media type\n * @param {Function} settings.excludePlaylist\n * Excludes the current rendition and forces a rendition switch.\n * @function setupMediaGroups\n */\n\nconst setupMediaGroups = settings => {\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(type => {\n initialize[type](type, settings);\n });\n const {\n mediaTypes,\n mainPlaylistLoader,\n tech,\n vhs,\n segmentLoaders: {\n ['AUDIO']: audioSegmentLoader,\n main: mainSegmentLoader\n }\n } = settings; // setup active group and track getters and change event handlers\n\n ['AUDIO', 'SUBTITLES'].forEach(type => {\n mediaTypes[type].activeGroup = activeGroup(type, settings);\n mediaTypes[type].activeTrack = activeTrack[type](type, settings);\n mediaTypes[type].onGroupChanged = onGroupChanged(type, settings);\n mediaTypes[type].onGroupChanging = onGroupChanging(type, settings);\n mediaTypes[type].onTrackChanged = onTrackChanged(type, settings);\n mediaTypes[type].getActiveGroup = getActiveGroup(type, settings);\n }); // DO NOT enable the default subtitle or caption track.\n // DO enable the default audio track\n\n const audioGroup = mediaTypes.AUDIO.activeGroup();\n if (audioGroup) {\n const groupId = (audioGroup.filter(group => group.default)[0] || audioGroup[0]).id;\n mediaTypes.AUDIO.tracks[groupId].enabled = true;\n mediaTypes.AUDIO.onGroupChanged();\n mediaTypes.AUDIO.onTrackChanged();\n const activeAudioGroup = mediaTypes.AUDIO.getActiveGroup(); // a similar check for handling setAudio on each loader is run again each time the\n // track is changed, but needs to be handled here since the track may not be considered\n // changed on the first call to onTrackChanged\n\n if (!activeAudioGroup.playlistLoader) {\n // either audio is muxed with video or the stream is audio only\n mainSegmentLoader.setAudio(true);\n } else {\n // audio is demuxed\n mainSegmentLoader.setAudio(false);\n audioSegmentLoader.setAudio(true);\n }\n }\n mainPlaylistLoader.on('mediachange', () => {\n ['AUDIO', 'SUBTITLES'].forEach(type => mediaTypes[type].onGroupChanged());\n });\n mainPlaylistLoader.on('mediachanging', () => {\n ['AUDIO', 'SUBTITLES'].forEach(type => mediaTypes[type].onGroupChanging());\n }); // custom audio track change event handler for usage event\n\n const onAudioTrackChanged = () => {\n mediaTypes.AUDIO.onTrackChanged();\n tech.trigger({\n type: 'usage',\n name: 'vhs-audio-change'\n });\n };\n tech.audioTracks().addEventListener('change', onAudioTrackChanged);\n tech.remoteTextTracks().addEventListener('change', mediaTypes.SUBTITLES.onTrackChanged);\n vhs.on('dispose', () => {\n tech.audioTracks().removeEventListener('change', onAudioTrackChanged);\n tech.remoteTextTracks().removeEventListener('change', mediaTypes.SUBTITLES.onTrackChanged);\n }); // clear existing audio tracks and add the ones we just created\n\n tech.clearTracks('audio');\n for (const id in mediaTypes.AUDIO.tracks) {\n tech.audioTracks().addTrack(mediaTypes.AUDIO.tracks[id]);\n }\n};\n/**\n * Creates skeleton object used to store the loaders, tracks, and utility methods for each\n * media type\n *\n * @return {Object}\n * Object to store the loaders, tracks, and utility methods for each media type\n * @function createMediaTypes\n */\n\nconst createMediaTypes = () => {\n const mediaTypes = {};\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(type => {\n mediaTypes[type] = {\n groups: {},\n tracks: {},\n activePlaylistLoader: null,\n activeGroup: noop,\n activeTrack: noop,\n getActiveGroup: noop,\n onGroupChanged: noop,\n onTrackChanged: noop,\n lastTrack_: null,\n logger_: logger(`MediaGroups[${type}]`)\n };\n });\n return mediaTypes;\n};\n\n/**\n * A utility class for setting properties and maintaining the state of the content steering manifest.\n *\n * Content Steering manifest format:\n * VERSION: number (required) currently only version 1 is supported.\n * TTL: number in seconds (optional) until the next content steering manifest reload.\n * RELOAD-URI: string (optional) uri to fetch the next content steering manifest.\n * SERVICE-LOCATION-PRIORITY or PATHWAY-PRIORITY a non empty array of unique string values.\n * PATHWAY-CLONES: array (optional) (HLS only) pathway clone objects to copy from other playlists.\n */\n\nclass SteeringManifest {\n constructor() {\n this.priority_ = [];\n this.pathwayClones_ = new Map();\n }\n set version(number) {\n // Only version 1 is currently supported for both DASH and HLS.\n if (number === 1) {\n this.version_ = number;\n }\n }\n set ttl(seconds) {\n // TTL = time-to-live, default = 300 seconds.\n this.ttl_ = seconds || 300;\n }\n set reloadUri(uri) {\n if (uri) {\n // reload URI can be relative to the previous reloadUri.\n this.reloadUri_ = resolveUrl(this.reloadUri_, uri);\n }\n }\n set priority(array) {\n // priority must be non-empty and unique values.\n if (array && array.length) {\n this.priority_ = array;\n }\n }\n set pathwayClones(array) {\n // pathwayClones must be non-empty.\n if (array && array.length) {\n this.pathwayClones_ = new Map(array.map(clone => [clone.ID, clone]));\n }\n }\n get version() {\n return this.version_;\n }\n get ttl() {\n return this.ttl_;\n }\n get reloadUri() {\n return this.reloadUri_;\n }\n get priority() {\n return this.priority_;\n }\n get pathwayClones() {\n return this.pathwayClones_;\n }\n}\n/**\n * This class represents a content steering manifest and associated state. See both HLS and DASH specifications.\n * HLS: https://developer.apple.com/streaming/HLSContentSteeringSpecification.pdf and\n * https://datatracker.ietf.org/doc/draft-pantos-hls-rfc8216bis/ section 4.4.6.6.\n * DASH: https://dashif.org/docs/DASH-IF-CTS-00XX-Content-Steering-Community-Review.pdf\n *\n * @param {function} xhr for making a network request from the browser.\n * @param {function} bandwidth for fetching the current bandwidth from the main segment loader.\n */\n\nclass ContentSteeringController extends videojs.EventTarget {\n constructor(xhr, bandwidth) {\n super();\n this.currentPathway = null;\n this.defaultPathway = null;\n this.queryBeforeStart = false;\n this.availablePathways_ = new Set();\n this.steeringManifest = new SteeringManifest();\n this.proxyServerUrl_ = null;\n this.manifestType_ = null;\n this.ttlTimeout_ = null;\n this.request_ = null;\n this.currentPathwayClones = new Map();\n this.nextPathwayClones = new Map();\n this.excludedSteeringManifestURLs = new Set();\n this.logger_ = logger('Content Steering');\n this.xhr_ = xhr;\n this.getBandwidth_ = bandwidth;\n }\n /**\n * Assigns the content steering tag properties to the steering controller\n *\n * @param {string} baseUrl the baseURL from the main manifest for resolving the steering manifest url\n * @param {Object} steeringTag the content steering tag from the main manifest\n */\n\n assignTagProperties(baseUrl, steeringTag) {\n this.manifestType_ = steeringTag.serverUri ? 'HLS' : 'DASH'; // serverUri is HLS serverURL is DASH\n\n const steeringUri = steeringTag.serverUri || steeringTag.serverURL;\n if (!steeringUri) {\n this.logger_(`steering manifest URL is ${steeringUri}, cannot request steering manifest.`);\n this.trigger('error');\n return;\n } // Content steering manifests can be encoded as a data URI. We can decode, parse and return early if that's the case.\n\n if (steeringUri.startsWith('data:')) {\n this.decodeDataUriManifest_(steeringUri.substring(steeringUri.indexOf(',') + 1));\n return;\n } // reloadUri is the resolution of the main manifest URL and steering URL.\n\n this.steeringManifest.reloadUri = resolveUrl(baseUrl, steeringUri); // pathwayId is HLS defaultServiceLocation is DASH\n\n this.defaultPathway = steeringTag.pathwayId || steeringTag.defaultServiceLocation; // currently only DASH supports the following properties on tags.\n\n this.queryBeforeStart = steeringTag.queryBeforeStart;\n this.proxyServerUrl_ = steeringTag.proxyServerURL; // trigger a steering event if we have a pathway from the content steering tag.\n // this tells VHS which segment pathway to start with.\n // If queryBeforeStart is true we need to wait for the steering manifest response.\n\n if (this.defaultPathway && !this.queryBeforeStart) {\n this.trigger('content-steering');\n }\n }\n /**\n * Requests the content steering manifest and parse the response. This should only be called after\n * assignTagProperties was called with a content steering tag.\n *\n * @param {string} initialUri The optional uri to make the request with.\n * If set, the request should be made with exactly what is passed in this variable.\n * This scenario should only happen once on initalization.\n */\n\n requestSteeringManifest(initial) {\n const reloadUri = this.steeringManifest.reloadUri;\n if (!reloadUri) {\n return;\n } // We currently don't support passing MPD query parameters directly to the content steering URL as this requires\n // ExtUrlQueryInfo tag support. See the DASH content steering spec section 8.1.\n // This request URI accounts for manifest URIs that have been excluded.\n\n const uri = initial ? reloadUri : this.getRequestURI(reloadUri); // If there are no valid manifest URIs, we should stop content steering.\n\n if (!uri) {\n this.logger_('No valid content steering manifest URIs. Stopping content steering.');\n this.trigger('error');\n this.dispose();\n return;\n }\n const metadata = {\n contentSteeringInfo: {\n uri\n }\n };\n this.trigger({\n type: 'contentsteeringloadstart',\n metadata\n });\n this.request_ = this.xhr_({\n uri,\n requestType: 'content-steering-manifest'\n }, (error, errorInfo) => {\n if (error) {\n // If the client receives HTTP 410 Gone in response to a manifest request,\n // it MUST NOT issue another request for that URI for the remainder of the\n // playback session. It MAY continue to use the most-recently obtained set\n // of Pathways.\n if (errorInfo.status === 410) {\n this.logger_(`manifest request 410 ${error}.`);\n this.logger_(`There will be no more content steering requests to ${uri} this session.`);\n this.excludedSteeringManifestURLs.add(uri);\n return;\n } // If the client receives HTTP 429 Too Many Requests with a Retry-After\n // header in response to a manifest request, it SHOULD wait until the time\n // specified by the Retry-After header to reissue the request.\n\n if (errorInfo.status === 429) {\n const retrySeconds = errorInfo.responseHeaders['retry-after'];\n this.logger_(`manifest request 429 ${error}.`);\n this.logger_(`content steering will retry in ${retrySeconds} seconds.`);\n this.startTTLTimeout_(parseInt(retrySeconds, 10));\n return;\n } // If the Steering Manifest cannot be loaded and parsed correctly, the\n // client SHOULD continue to use the previous values and attempt to reload\n // it after waiting for the previously-specified TTL (or 5 minutes if\n // none).\n\n this.logger_(`manifest failed to load ${error}.`);\n this.startTTLTimeout_();\n return;\n }\n this.trigger({\n type: 'contentsteeringloadcomplete',\n metadata\n });\n let steeringManifestJson;\n try {\n steeringManifestJson = JSON.parse(this.request_.responseText);\n } catch (parseError) {\n const errorMetadata = {\n errorType: videojs.Error.StreamingContentSteeringParserError,\n error: parseError\n };\n this.trigger({\n type: 'error',\n metadata: errorMetadata\n });\n }\n this.assignSteeringProperties_(steeringManifestJson);\n const parsedMetadata = {\n contentSteeringInfo: metadata.contentSteeringInfo,\n contentSteeringManifest: {\n version: this.steeringManifest.version,\n reloadUri: this.steeringManifest.reloadUri,\n priority: this.steeringManifest.priority\n }\n };\n this.trigger({\n type: 'contentsteeringparsed',\n metadata: parsedMetadata\n });\n this.startTTLTimeout_();\n });\n }\n /**\n * Set the proxy server URL and add the steering manifest url as a URI encoded parameter.\n *\n * @param {string} steeringUrl the steering manifest url\n * @return the steering manifest url to a proxy server with all parameters set\n */\n\n setProxyServerUrl_(steeringUrl) {\n const steeringUrlObject = new window$1.URL(steeringUrl);\n const proxyServerUrlObject = new window$1.URL(this.proxyServerUrl_);\n proxyServerUrlObject.searchParams.set('url', encodeURI(steeringUrlObject.toString()));\n return this.setSteeringParams_(proxyServerUrlObject.toString());\n }\n /**\n * Decodes and parses the data uri encoded steering manifest\n *\n * @param {string} dataUri the data uri to be decoded and parsed.\n */\n\n decodeDataUriManifest_(dataUri) {\n const steeringManifestJson = JSON.parse(window$1.atob(dataUri));\n this.assignSteeringProperties_(steeringManifestJson);\n }\n /**\n * Set the HLS or DASH content steering manifest request query parameters. For example:\n * _HLS_pathway=\"\" and _HLS_throughput=\n * _DASH_pathway and _DASH_throughput\n *\n * @param {string} uri to add content steering server parameters to.\n * @return a new uri as a string with the added steering query parameters.\n */\n\n setSteeringParams_(url) {\n const urlObject = new window$1.URL(url);\n const path = this.getPathway();\n const networkThroughput = this.getBandwidth_();\n if (path) {\n const pathwayKey = `_${this.manifestType_}_pathway`;\n urlObject.searchParams.set(pathwayKey, path);\n }\n if (networkThroughput) {\n const throughputKey = `_${this.manifestType_}_throughput`;\n urlObject.searchParams.set(throughputKey, networkThroughput);\n }\n return urlObject.toString();\n }\n /**\n * Assigns the current steering manifest properties and to the SteeringManifest object\n *\n * @param {Object} steeringJson the raw JSON steering manifest\n */\n\n assignSteeringProperties_(steeringJson) {\n this.steeringManifest.version = steeringJson.VERSION;\n if (!this.steeringManifest.version) {\n this.logger_(`manifest version is ${steeringJson.VERSION}, which is not supported.`);\n this.trigger('error');\n return;\n }\n this.steeringManifest.ttl = steeringJson.TTL;\n this.steeringManifest.reloadUri = steeringJson['RELOAD-URI']; // HLS = PATHWAY-PRIORITY required. DASH = SERVICE-LOCATION-PRIORITY optional\n\n this.steeringManifest.priority = steeringJson['PATHWAY-PRIORITY'] || steeringJson['SERVICE-LOCATION-PRIORITY']; // Pathway clones to be created/updated in HLS.\n // See section 7.2 https://datatracker.ietf.org/doc/draft-pantos-hls-rfc8216bis/\n\n this.steeringManifest.pathwayClones = steeringJson['PATHWAY-CLONES'];\n this.nextPathwayClones = this.steeringManifest.pathwayClones; // 1. apply first pathway from the array.\n // 2. if first pathway doesn't exist in manifest, try next pathway.\n // a. if all pathways are exhausted, ignore the steering manifest priority.\n // 3. if segments fail from an established pathway, try all variants/renditions, then exclude the failed pathway.\n // a. exclude a pathway for a minimum of the last TTL duration. Meaning, from the next steering response,\n // the excluded pathway will be ignored.\n // See excludePathway usage in excludePlaylist().\n // If there are no available pathways, we need to stop content steering.\n\n if (!this.availablePathways_.size) {\n this.logger_('There are no available pathways for content steering. Ending content steering.');\n this.trigger('error');\n this.dispose();\n }\n const chooseNextPathway = pathwaysByPriority => {\n for (const path of pathwaysByPriority) {\n if (this.availablePathways_.has(path)) {\n return path;\n }\n } // If no pathway matches, ignore the manifest and choose the first available.\n\n return [...this.availablePathways_][0];\n };\n const nextPathway = chooseNextPathway(this.steeringManifest.priority);\n if (this.currentPathway !== nextPathway) {\n this.currentPathway = nextPathway;\n this.trigger('content-steering');\n }\n }\n /**\n * Returns the pathway to use for steering decisions\n *\n * @return {string} returns the current pathway or the default\n */\n\n getPathway() {\n return this.currentPathway || this.defaultPathway;\n }\n /**\n * Chooses the manifest request URI based on proxy URIs and server URLs.\n * Also accounts for exclusion on certain manifest URIs.\n *\n * @param {string} reloadUri the base uri before parameters\n *\n * @return {string} the final URI for the request to the manifest server.\n */\n\n getRequestURI(reloadUri) {\n if (!reloadUri) {\n return null;\n }\n const isExcluded = uri => this.excludedSteeringManifestURLs.has(uri);\n if (this.proxyServerUrl_) {\n const proxyURI = this.setProxyServerUrl_(reloadUri);\n if (!isExcluded(proxyURI)) {\n return proxyURI;\n }\n }\n const steeringURI = this.setSteeringParams_(reloadUri);\n if (!isExcluded(steeringURI)) {\n return steeringURI;\n } // Return nothing if all valid manifest URIs are excluded.\n\n return null;\n }\n /**\n * Start the timeout for re-requesting the steering manifest at the TTL interval.\n *\n * @param {number} ttl time in seconds of the timeout. Defaults to the\n * ttl interval in the steering manifest\n */\n\n startTTLTimeout_(ttl = this.steeringManifest.ttl) {\n // 300 (5 minutes) is the default value.\n const ttlMS = ttl * 1000;\n this.ttlTimeout_ = window$1.setTimeout(() => {\n this.requestSteeringManifest();\n }, ttlMS);\n }\n /**\n * Clear the TTL timeout if necessary.\n */\n\n clearTTLTimeout_() {\n window$1.clearTimeout(this.ttlTimeout_);\n this.ttlTimeout_ = null;\n }\n /**\n * aborts any current steering xhr and sets the current request object to null\n */\n\n abort() {\n if (this.request_) {\n this.request_.abort();\n }\n this.request_ = null;\n }\n /**\n * aborts steering requests clears the ttl timeout and resets all properties.\n */\n\n dispose() {\n this.off('content-steering');\n this.off('error');\n this.abort();\n this.clearTTLTimeout_();\n this.currentPathway = null;\n this.defaultPathway = null;\n this.queryBeforeStart = null;\n this.proxyServerUrl_ = null;\n this.manifestType_ = null;\n this.ttlTimeout_ = null;\n this.request_ = null;\n this.excludedSteeringManifestURLs = new Set();\n this.availablePathways_ = new Set();\n this.steeringManifest = new SteeringManifest();\n }\n /**\n * adds a pathway to the available pathways set\n *\n * @param {string} pathway the pathway string to add\n */\n\n addAvailablePathway(pathway) {\n if (pathway) {\n this.availablePathways_.add(pathway);\n }\n }\n /**\n * Clears all pathways from the available pathways set\n */\n\n clearAvailablePathways() {\n this.availablePathways_.clear();\n }\n /**\n * Removes a pathway from the available pathways set.\n */\n\n excludePathway(pathway) {\n return this.availablePathways_.delete(pathway);\n }\n /**\n * Checks the refreshed DASH manifest content steering tag for changes.\n *\n * @param {string} baseURL new steering tag on DASH manifest refresh\n * @param {Object} newTag the new tag to check for changes\n * @return a true or false whether the new tag has different values\n */\n\n didDASHTagChange(baseURL, newTag) {\n return !newTag && this.steeringManifest.reloadUri || newTag && (resolveUrl(baseURL, newTag.serverURL) !== this.steeringManifest.reloadUri || newTag.defaultServiceLocation !== this.defaultPathway || newTag.queryBeforeStart !== this.queryBeforeStart || newTag.proxyServerURL !== this.proxyServerUrl_);\n }\n getAvailablePathways() {\n return this.availablePathways_;\n }\n}\nconst ABORT_EARLY_EXCLUSION_SECONDS = 10;\nlet Vhs$1; // SegmentLoader stats that need to have each loader's\n// values summed to calculate the final value\n\nconst loaderStats = ['mediaRequests', 'mediaRequestsAborted', 'mediaRequestsTimedout', 'mediaRequestsErrored', 'mediaTransferDuration', 'mediaBytesTransferred', 'mediaAppends'];\nconst sumLoaderStat = function (stat) {\n return this.audioSegmentLoader_[stat] + this.mainSegmentLoader_[stat];\n};\nconst shouldSwitchToMedia = function ({\n currentPlaylist,\n buffered,\n currentTime,\n nextPlaylist,\n bufferLowWaterLine,\n bufferHighWaterLine,\n duration,\n bufferBasedABR,\n log\n}) {\n // we have no other playlist to switch to\n if (!nextPlaylist) {\n videojs.log.warn('We received no playlist to switch to. Please check your stream.');\n return false;\n }\n const sharedLogLine = `allowing switch ${currentPlaylist && currentPlaylist.id || 'null'} -> ${nextPlaylist.id}`;\n if (!currentPlaylist) {\n log(`${sharedLogLine} as current playlist is not set`);\n return true;\n } // no need to switch if playlist is the same\n\n if (nextPlaylist.id === currentPlaylist.id) {\n return false;\n } // determine if current time is in a buffered range.\n\n const isBuffered = Boolean(findRange(buffered, currentTime).length); // If the playlist is live, then we want to not take low water line into account.\n // This is because in LIVE, the player plays 3 segments from the end of the\n // playlist, and if `BUFFER_LOW_WATER_LINE` is greater than the duration availble\n // in those segments, a viewer will never experience a rendition upswitch.\n\n if (!currentPlaylist.endList) {\n // For LLHLS live streams, don't switch renditions before playback has started, as it almost\n // doubles the time to first playback.\n if (!isBuffered && typeof currentPlaylist.partTargetDuration === 'number') {\n log(`not ${sharedLogLine} as current playlist is live llhls, but currentTime isn't in buffered.`);\n return false;\n }\n log(`${sharedLogLine} as current playlist is live`);\n return true;\n }\n const forwardBuffer = timeAheadOf(buffered, currentTime);\n const maxBufferLowWaterLine = bufferBasedABR ? Config.EXPERIMENTAL_MAX_BUFFER_LOW_WATER_LINE : Config.MAX_BUFFER_LOW_WATER_LINE; // For the same reason as LIVE, we ignore the low water line when the VOD\n // duration is below the max potential low water line\n\n if (duration < maxBufferLowWaterLine) {\n log(`${sharedLogLine} as duration < max low water line (${duration} < ${maxBufferLowWaterLine})`);\n return true;\n }\n const nextBandwidth = nextPlaylist.attributes.BANDWIDTH;\n const currBandwidth = currentPlaylist.attributes.BANDWIDTH; // when switching down, if our buffer is lower than the high water line,\n // we can switch down\n\n if (nextBandwidth < currBandwidth && (!bufferBasedABR || forwardBuffer < bufferHighWaterLine)) {\n let logLine = `${sharedLogLine} as next bandwidth < current bandwidth (${nextBandwidth} < ${currBandwidth})`;\n if (bufferBasedABR) {\n logLine += ` and forwardBuffer < bufferHighWaterLine (${forwardBuffer} < ${bufferHighWaterLine})`;\n }\n log(logLine);\n return true;\n } // and if our buffer is higher than the low water line,\n // we can switch up\n\n if ((!bufferBasedABR || nextBandwidth > currBandwidth) && forwardBuffer >= bufferLowWaterLine) {\n let logLine = `${sharedLogLine} as forwardBuffer >= bufferLowWaterLine (${forwardBuffer} >= ${bufferLowWaterLine})`;\n if (bufferBasedABR) {\n logLine += ` and next bandwidth > current bandwidth (${nextBandwidth} > ${currBandwidth})`;\n }\n log(logLine);\n return true;\n }\n log(`not ${sharedLogLine} as no switching criteria met`);\n return false;\n};\n/**\n * the main playlist controller controller all interactons\n * between playlists and segmentloaders. At this time this mainly\n * involves a main playlist and a series of audio playlists\n * if they are available\n *\n * @class PlaylistController\n * @extends videojs.EventTarget\n */\n\nclass PlaylistController extends videojs.EventTarget {\n constructor(options) {\n super();\n const {\n src,\n withCredentials,\n tech,\n bandwidth,\n externVhs,\n useCueTags,\n playlistExclusionDuration,\n enableLowInitialPlaylist,\n sourceType,\n cacheEncryptionKeys,\n bufferBasedABR,\n leastPixelDiffSelector,\n captionServices\n } = options;\n if (!src) {\n throw new Error('A non-empty playlist URL or JSON manifest string is required');\n }\n let {\n maxPlaylistRetries\n } = options;\n if (maxPlaylistRetries === null || typeof maxPlaylistRetries === 'undefined') {\n maxPlaylistRetries = Infinity;\n }\n Vhs$1 = externVhs;\n this.bufferBasedABR = Boolean(bufferBasedABR);\n this.leastPixelDiffSelector = Boolean(leastPixelDiffSelector);\n this.withCredentials = withCredentials;\n this.tech_ = tech;\n this.vhs_ = tech.vhs;\n this.player_ = options.player_;\n this.sourceType_ = sourceType;\n this.useCueTags_ = useCueTags;\n this.playlistExclusionDuration = playlistExclusionDuration;\n this.maxPlaylistRetries = maxPlaylistRetries;\n this.enableLowInitialPlaylist = enableLowInitialPlaylist;\n if (this.useCueTags_) {\n this.cueTagsTrack_ = this.tech_.addTextTrack('metadata', 'ad-cues');\n this.cueTagsTrack_.inBandMetadataTrackDispatchType = '';\n }\n this.requestOptions_ = {\n withCredentials,\n maxPlaylistRetries,\n timeout: null\n };\n this.on('error', this.pauseLoading);\n this.mediaTypes_ = createMediaTypes();\n this.mediaSource = new window$1.MediaSource();\n this.handleDurationChange_ = this.handleDurationChange_.bind(this);\n this.handleSourceOpen_ = this.handleSourceOpen_.bind(this);\n this.handleSourceEnded_ = this.handleSourceEnded_.bind(this);\n this.mediaSource.addEventListener('durationchange', this.handleDurationChange_); // load the media source into the player\n\n this.mediaSource.addEventListener('sourceopen', this.handleSourceOpen_);\n this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_); // we don't have to handle sourceclose since dispose will handle termination of\n // everything, and the MediaSource should not be detached without a proper disposal\n\n this.seekable_ = createTimeRanges();\n this.hasPlayed_ = false;\n this.syncController_ = new SyncController(options);\n this.segmentMetadataTrack_ = tech.addRemoteTextTrack({\n kind: 'metadata',\n label: 'segment-metadata'\n }, false).track;\n this.decrypter_ = new Decrypter();\n this.sourceUpdater_ = new SourceUpdater(this.mediaSource);\n this.inbandTextTracks_ = {};\n this.timelineChangeController_ = new TimelineChangeController();\n this.keyStatusMap_ = new Map();\n const segmentLoaderSettings = {\n vhs: this.vhs_,\n parse708captions: options.parse708captions,\n useDtsForTimestampOffset: options.useDtsForTimestampOffset,\n captionServices,\n mediaSource: this.mediaSource,\n currentTime: this.tech_.currentTime.bind(this.tech_),\n seekable: () => this.seekable(),\n seeking: () => this.tech_.seeking(),\n duration: () => this.duration(),\n hasPlayed: () => this.hasPlayed_,\n goalBufferLength: () => this.goalBufferLength(),\n bandwidth,\n syncController: this.syncController_,\n decrypter: this.decrypter_,\n sourceType: this.sourceType_,\n inbandTextTracks: this.inbandTextTracks_,\n cacheEncryptionKeys,\n sourceUpdater: this.sourceUpdater_,\n timelineChangeController: this.timelineChangeController_,\n exactManifestTimings: options.exactManifestTimings,\n addMetadataToTextTrack: this.addMetadataToTextTrack.bind(this)\n }; // The source type check not only determines whether a special DASH playlist loader\n // should be used, but also covers the case where the provided src is a vhs-json\n // manifest object (instead of a URL). In the case of vhs-json, the default\n // PlaylistLoader should be used.\n\n this.mainPlaylistLoader_ = this.sourceType_ === 'dash' ? new DashPlaylistLoader(src, this.vhs_, merge(this.requestOptions_, {\n addMetadataToTextTrack: this.addMetadataToTextTrack.bind(this)\n })) : new PlaylistLoader(src, this.vhs_, merge(this.requestOptions_, {\n addDateRangesToTextTrack: this.addDateRangesToTextTrack_.bind(this)\n }));\n this.setupMainPlaylistLoaderListeners_(); // setup segment loaders\n // combined audio/video or just video when alternate audio track is selected\n\n this.mainSegmentLoader_ = new SegmentLoader(merge(segmentLoaderSettings, {\n segmentMetadataTrack: this.segmentMetadataTrack_,\n loaderType: 'main'\n }), options); // alternate audio track\n\n this.audioSegmentLoader_ = new SegmentLoader(merge(segmentLoaderSettings, {\n loaderType: 'audio'\n }), options);\n this.subtitleSegmentLoader_ = new VTTSegmentLoader(merge(segmentLoaderSettings, {\n loaderType: 'vtt',\n featuresNativeTextTracks: this.tech_.featuresNativeTextTracks,\n loadVttJs: () => new Promise((resolve, reject) => {\n function onLoad() {\n tech.off('vttjserror', onError);\n resolve();\n }\n function onError() {\n tech.off('vttjsloaded', onLoad);\n reject();\n }\n tech.one('vttjsloaded', onLoad);\n tech.one('vttjserror', onError); // safe to call multiple times, script will be loaded only once:\n\n tech.addWebVttScript_();\n })\n }), options);\n const getBandwidth = () => {\n return this.mainSegmentLoader_.bandwidth;\n };\n this.contentSteeringController_ = new ContentSteeringController(this.vhs_.xhr, getBandwidth);\n this.setupSegmentLoaderListeners_();\n if (this.bufferBasedABR) {\n this.mainPlaylistLoader_.one('loadedplaylist', () => this.startABRTimer_());\n this.tech_.on('pause', () => this.stopABRTimer_());\n this.tech_.on('play', () => this.startABRTimer_());\n } // Create SegmentLoader stat-getters\n // mediaRequests_\n // mediaRequestsAborted_\n // mediaRequestsTimedout_\n // mediaRequestsErrored_\n // mediaTransferDuration_\n // mediaBytesTransferred_\n // mediaAppends_\n\n loaderStats.forEach(stat => {\n this[stat + '_'] = sumLoaderStat.bind(this, stat);\n });\n this.logger_ = logger('pc');\n this.triggeredFmp4Usage = false;\n if (this.tech_.preload() === 'none') {\n this.loadOnPlay_ = () => {\n this.loadOnPlay_ = null;\n this.mainPlaylistLoader_.load();\n };\n this.tech_.one('play', this.loadOnPlay_);\n } else {\n this.mainPlaylistLoader_.load();\n }\n this.timeToLoadedData__ = -1;\n this.mainAppendsToLoadedData__ = -1;\n this.audioAppendsToLoadedData__ = -1;\n const event = this.tech_.preload() === 'none' ? 'play' : 'loadstart'; // start the first frame timer on loadstart or play (for preload none)\n\n this.tech_.one(event, () => {\n const timeToLoadedDataStart = Date.now();\n this.tech_.one('loadeddata', () => {\n this.timeToLoadedData__ = Date.now() - timeToLoadedDataStart;\n this.mainAppendsToLoadedData__ = this.mainSegmentLoader_.mediaAppends;\n this.audioAppendsToLoadedData__ = this.audioSegmentLoader_.mediaAppends;\n });\n });\n }\n mainAppendsToLoadedData_() {\n return this.mainAppendsToLoadedData__;\n }\n audioAppendsToLoadedData_() {\n return this.audioAppendsToLoadedData__;\n }\n appendsToLoadedData_() {\n const main = this.mainAppendsToLoadedData_();\n const audio = this.audioAppendsToLoadedData_();\n if (main === -1 || audio === -1) {\n return -1;\n }\n return main + audio;\n }\n timeToLoadedData_() {\n return this.timeToLoadedData__;\n }\n /**\n * Run selectPlaylist and switch to the new playlist if we should\n *\n * @param {string} [reason=abr] a reason for why the ABR check is made\n * @private\n */\n\n checkABR_(reason = 'abr') {\n const nextPlaylist = this.selectPlaylist();\n if (nextPlaylist && this.shouldSwitchToMedia_(nextPlaylist)) {\n this.switchMedia_(nextPlaylist, reason);\n }\n }\n switchMedia_(playlist, cause, delay) {\n const oldMedia = this.media();\n const oldId = oldMedia && (oldMedia.id || oldMedia.uri);\n const newId = playlist && (playlist.id || playlist.uri);\n if (oldId && oldId !== newId) {\n this.logger_(`switch media ${oldId} -> ${newId} from ${cause}`);\n const metadata = {\n renditionInfo: {\n id: newId,\n bandwidth: playlist.attributes.BANDWIDTH,\n resolution: playlist.attributes.RESOLUTION,\n codecs: playlist.attributes.CODECS\n },\n cause\n };\n this.trigger({\n type: 'renditionselected',\n metadata\n });\n this.tech_.trigger({\n type: 'usage',\n name: `vhs-rendition-change-${cause}`\n });\n }\n this.mainPlaylistLoader_.media(playlist, delay);\n }\n /**\n * A function that ensures we switch our playlists inside of `mediaTypes`\n * to match the current `serviceLocation` provided by the contentSteering controller.\n * We want to check media types of `AUDIO`, `SUBTITLES`, and `CLOSED-CAPTIONS`.\n *\n * This should only be called on a DASH playback scenario while using content steering.\n * This is necessary due to differences in how media in HLS manifests are generally tied to\n * a video playlist, where in DASH that is not always the case.\n */\n\n switchMediaForDASHContentSteering_() {\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(type => {\n const mediaType = this.mediaTypes_[type];\n const activeGroup = mediaType ? mediaType.activeGroup() : null;\n const pathway = this.contentSteeringController_.getPathway();\n if (activeGroup && pathway) {\n // activeGroup can be an array or a single group\n const mediaPlaylists = activeGroup.length ? activeGroup[0].playlists : activeGroup.playlists;\n const dashMediaPlaylists = mediaPlaylists.filter(p => p.attributes.serviceLocation === pathway); // Switch the current active playlist to the correct CDN\n\n if (dashMediaPlaylists.length) {\n this.mediaTypes_[type].activePlaylistLoader.media(dashMediaPlaylists[0]);\n }\n }\n });\n }\n /**\n * Start a timer that periodically calls checkABR_\n *\n * @private\n */\n\n startABRTimer_() {\n this.stopABRTimer_();\n this.abrTimer_ = window$1.setInterval(() => this.checkABR_(), 250);\n }\n /**\n * Stop the timer that periodically calls checkABR_\n *\n * @private\n */\n\n stopABRTimer_() {\n // if we're scrubbing, we don't need to pause.\n // This getter will be added to Video.js in version 7.11.\n if (this.tech_.scrubbing && this.tech_.scrubbing()) {\n return;\n }\n window$1.clearInterval(this.abrTimer_);\n this.abrTimer_ = null;\n }\n /**\n * Get a list of playlists for the currently selected audio playlist\n *\n * @return {Array} the array of audio playlists\n */\n\n getAudioTrackPlaylists_() {\n const main = this.main();\n const defaultPlaylists = main && main.playlists || []; // if we don't have any audio groups then we can only\n // assume that the audio tracks are contained in main\n // playlist array, use that or an empty array.\n\n if (!main || !main.mediaGroups || !main.mediaGroups.AUDIO) {\n return defaultPlaylists;\n }\n const AUDIO = main.mediaGroups.AUDIO;\n const groupKeys = Object.keys(AUDIO);\n let track; // get the current active track\n\n if (Object.keys(this.mediaTypes_.AUDIO.groups).length) {\n track = this.mediaTypes_.AUDIO.activeTrack(); // or get the default track from main if mediaTypes_ isn't setup yet\n } else {\n // default group is `main` or just the first group.\n const defaultGroup = AUDIO.main || groupKeys.length && AUDIO[groupKeys[0]];\n for (const label in defaultGroup) {\n if (defaultGroup[label].default) {\n track = {\n label\n };\n break;\n }\n }\n } // no active track no playlists.\n\n if (!track) {\n return defaultPlaylists;\n }\n const playlists = []; // get all of the playlists that are possible for the\n // active track.\n\n for (const group in AUDIO) {\n if (AUDIO[group][track.label]) {\n const properties = AUDIO[group][track.label];\n if (properties.playlists && properties.playlists.length) {\n playlists.push.apply(playlists, properties.playlists);\n } else if (properties.uri) {\n playlists.push(properties);\n } else if (main.playlists.length) {\n // if an audio group does not have a uri\n // see if we have main playlists that use it as a group.\n // if we do then add those to the playlists list.\n for (let i = 0; i < main.playlists.length; i++) {\n const playlist = main.playlists[i];\n if (playlist.attributes && playlist.attributes.AUDIO && playlist.attributes.AUDIO === group) {\n playlists.push(playlist);\n }\n }\n }\n }\n }\n if (!playlists.length) {\n return defaultPlaylists;\n }\n return playlists;\n }\n /**\n * Register event handlers on the main playlist loader. A helper\n * function for construction time.\n *\n * @private\n */\n\n setupMainPlaylistLoaderListeners_() {\n this.mainPlaylistLoader_.on('loadedmetadata', () => {\n const media = this.mainPlaylistLoader_.media();\n const requestTimeout = media.targetDuration * 1.5 * 1000; // If we don't have any more available playlists, we don't want to\n // timeout the request.\n\n if (isLowestEnabledRendition(this.mainPlaylistLoader_.main, this.mainPlaylistLoader_.media())) {\n this.requestOptions_.timeout = 0;\n } else {\n this.requestOptions_.timeout = requestTimeout;\n } // if this isn't a live video and preload permits, start\n // downloading segments\n\n if (media.endList && this.tech_.preload() !== 'none') {\n this.mainSegmentLoader_.playlist(media, this.requestOptions_);\n this.mainSegmentLoader_.load();\n }\n setupMediaGroups({\n sourceType: this.sourceType_,\n segmentLoaders: {\n AUDIO: this.audioSegmentLoader_,\n SUBTITLES: this.subtitleSegmentLoader_,\n main: this.mainSegmentLoader_\n },\n tech: this.tech_,\n requestOptions: this.requestOptions_,\n mainPlaylistLoader: this.mainPlaylistLoader_,\n vhs: this.vhs_,\n main: this.main(),\n mediaTypes: this.mediaTypes_,\n excludePlaylist: this.excludePlaylist.bind(this)\n });\n this.triggerPresenceUsage_(this.main(), media);\n this.setupFirstPlay();\n if (!this.mediaTypes_.AUDIO.activePlaylistLoader || this.mediaTypes_.AUDIO.activePlaylistLoader.media()) {\n this.trigger('selectedinitialmedia');\n } else {\n // We must wait for the active audio playlist loader to\n // finish setting up before triggering this event so the\n // representations API and EME setup is correct\n this.mediaTypes_.AUDIO.activePlaylistLoader.one('loadedmetadata', () => {\n this.trigger('selectedinitialmedia');\n });\n }\n });\n this.mainPlaylistLoader_.on('loadedplaylist', () => {\n if (this.loadOnPlay_) {\n this.tech_.off('play', this.loadOnPlay_);\n }\n let updatedPlaylist = this.mainPlaylistLoader_.media();\n if (!updatedPlaylist) {\n // Add content steering listeners on first load and init.\n this.attachContentSteeringListeners_();\n this.initContentSteeringController_(); // exclude any variants that are not supported by the browser before selecting\n // an initial media as the playlist selectors do not consider browser support\n\n this.excludeUnsupportedVariants_();\n let selectedMedia;\n if (this.enableLowInitialPlaylist) {\n selectedMedia = this.selectInitialPlaylist();\n }\n if (!selectedMedia) {\n selectedMedia = this.selectPlaylist();\n }\n if (!selectedMedia || !this.shouldSwitchToMedia_(selectedMedia)) {\n return;\n }\n this.initialMedia_ = selectedMedia;\n this.switchMedia_(this.initialMedia_, 'initial'); // Under the standard case where a source URL is provided, loadedplaylist will\n // fire again since the playlist will be requested. In the case of vhs-json\n // (where the manifest object is provided as the source), when the media\n // playlist's `segments` list is already available, a media playlist won't be\n // requested, and loadedplaylist won't fire again, so the playlist handler must be\n // called on its own here.\n\n const haveJsonSource = this.sourceType_ === 'vhs-json' && this.initialMedia_.segments;\n if (!haveJsonSource) {\n return;\n }\n updatedPlaylist = this.initialMedia_;\n }\n this.handleUpdatedMediaPlaylist(updatedPlaylist);\n });\n this.mainPlaylistLoader_.on('error', () => {\n const error = this.mainPlaylistLoader_.error;\n this.excludePlaylist({\n playlistToExclude: error.playlist,\n error\n });\n });\n this.mainPlaylistLoader_.on('mediachanging', () => {\n this.mainSegmentLoader_.abort();\n this.mainSegmentLoader_.pause();\n });\n this.mainPlaylistLoader_.on('mediachange', () => {\n const media = this.mainPlaylistLoader_.media();\n const requestTimeout = media.targetDuration * 1.5 * 1000; // If we don't have any more available playlists, we don't want to\n // timeout the request.\n\n if (isLowestEnabledRendition(this.mainPlaylistLoader_.main, this.mainPlaylistLoader_.media())) {\n this.requestOptions_.timeout = 0;\n } else {\n this.requestOptions_.timeout = requestTimeout;\n }\n if (this.sourceType_ === 'dash') {\n // we don't want to re-request the same hls playlist right after it was changed\n this.mainPlaylistLoader_.load();\n } // TODO: Create a new event on the PlaylistLoader that signals\n // that the segments have changed in some way and use that to\n // update the SegmentLoader instead of doing it twice here and\n // on `loadedplaylist`\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.playlist(media, this.requestOptions_);\n if (this.waitingForFastQualityPlaylistReceived_) {\n this.runFastQualitySwitch_();\n } else {\n this.mainSegmentLoader_.load();\n }\n this.tech_.trigger({\n type: 'mediachange',\n bubbles: true\n });\n });\n this.mainPlaylistLoader_.on('playlistunchanged', () => {\n const updatedPlaylist = this.mainPlaylistLoader_.media(); // ignore unchanged playlists that have already been\n // excluded for not-changing. We likely just have a really slowly updating\n // playlist.\n\n if (updatedPlaylist.lastExcludeReason_ === 'playlist-unchanged') {\n return;\n }\n const playlistOutdated = this.stuckAtPlaylistEnd_(updatedPlaylist);\n if (playlistOutdated) {\n // Playlist has stopped updating and we're stuck at its end. Try to\n // exclude it and switch to another playlist in the hope that that\n // one is updating (and give the player a chance to re-adjust to the\n // safe live point).\n this.excludePlaylist({\n error: {\n message: 'Playlist no longer updating.',\n reason: 'playlist-unchanged'\n }\n }); // useful for monitoring QoS\n\n this.tech_.trigger('playliststuck');\n }\n });\n this.mainPlaylistLoader_.on('renditiondisabled', () => {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-rendition-disabled'\n });\n });\n this.mainPlaylistLoader_.on('renditionenabled', () => {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-rendition-enabled'\n });\n });\n const playlistLoaderEvents = ['manifestrequeststart', 'manifestrequestcomplete', 'manifestparsestart', 'manifestparsecomplete', 'playlistrequeststart', 'playlistrequestcomplete', 'playlistparsestart', 'playlistparsecomplete', 'renditiondisabled', 'renditionenabled'];\n playlistLoaderEvents.forEach(eventName => {\n this.mainPlaylistLoader_.on(eventName, metadata => {\n // trigger directly on the player to ensure early events are fired.\n this.player_.trigger(_extends({}, metadata));\n });\n });\n }\n /**\n * Given an updated media playlist (whether it was loaded for the first time, or\n * refreshed for live playlists), update any relevant properties and state to reflect\n * changes in the media that should be accounted for (e.g., cues and duration).\n *\n * @param {Object} updatedPlaylist the updated media playlist object\n *\n * @private\n */\n\n handleUpdatedMediaPlaylist(updatedPlaylist) {\n if (this.useCueTags_) {\n this.updateAdCues_(updatedPlaylist);\n } // TODO: Create a new event on the PlaylistLoader that signals\n // that the segments have changed in some way and use that to\n // update the SegmentLoader instead of doing it twice here and\n // on `mediachange`\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.playlist(updatedPlaylist, this.requestOptions_);\n if (this.waitingForFastQualityPlaylistReceived_) {\n this.runFastQualitySwitch_();\n }\n this.updateDuration(!updatedPlaylist.endList); // If the player isn't paused, ensure that the segment loader is running,\n // as it is possible that it was temporarily stopped while waiting for\n // a playlist (e.g., in case the playlist errored and we re-requested it).\n\n if (!this.tech_.paused()) {\n this.mainSegmentLoader_.load();\n if (this.audioSegmentLoader_) {\n this.audioSegmentLoader_.load();\n }\n }\n }\n /**\n * A helper function for triggerring presence usage events once per source\n *\n * @private\n */\n\n triggerPresenceUsage_(main, media) {\n const mediaGroups = main.mediaGroups || {};\n let defaultDemuxed = true;\n const audioGroupKeys = Object.keys(mediaGroups.AUDIO);\n for (const mediaGroup in mediaGroups.AUDIO) {\n for (const label in mediaGroups.AUDIO[mediaGroup]) {\n const properties = mediaGroups.AUDIO[mediaGroup][label];\n if (!properties.uri) {\n defaultDemuxed = false;\n }\n }\n }\n if (defaultDemuxed) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-demuxed'\n });\n }\n if (Object.keys(mediaGroups.SUBTITLES).length) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-webvtt'\n });\n }\n if (Vhs$1.Playlist.isAes(media)) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-aes'\n });\n }\n if (audioGroupKeys.length && Object.keys(mediaGroups.AUDIO[audioGroupKeys[0]]).length > 1) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-alternate-audio'\n });\n }\n if (this.useCueTags_) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-playlist-cue-tags'\n });\n }\n }\n shouldSwitchToMedia_(nextPlaylist) {\n const currentPlaylist = this.mainPlaylistLoader_.media() || this.mainPlaylistLoader_.pendingMedia_;\n const currentTime = this.tech_.currentTime();\n const bufferLowWaterLine = this.bufferLowWaterLine();\n const bufferHighWaterLine = this.bufferHighWaterLine();\n const buffered = this.tech_.buffered();\n return shouldSwitchToMedia({\n buffered,\n currentTime,\n currentPlaylist,\n nextPlaylist,\n bufferLowWaterLine,\n bufferHighWaterLine,\n duration: this.duration(),\n bufferBasedABR: this.bufferBasedABR,\n log: this.logger_\n });\n }\n /**\n * Register event handlers on the segment loaders. A helper function\n * for construction time.\n *\n * @private\n */\n\n setupSegmentLoaderListeners_() {\n this.mainSegmentLoader_.on('bandwidthupdate', () => {\n // Whether or not buffer based ABR or another ABR is used, on a bandwidth change it's\n // useful to check to see if a rendition switch should be made.\n this.checkABR_('bandwidthupdate');\n this.tech_.trigger('bandwidthupdate');\n });\n this.mainSegmentLoader_.on('timeout', () => {\n if (this.bufferBasedABR) {\n // If a rendition change is needed, then it would've be done on `bandwidthupdate`.\n // Here the only consideration is that for buffer based ABR there's no guarantee\n // of an immediate switch (since the bandwidth is averaged with a timeout\n // bandwidth value of 1), so force a load on the segment loader to keep it going.\n this.mainSegmentLoader_.load();\n }\n }); // `progress` events are not reliable enough of a bandwidth measure to trigger buffer\n // based ABR.\n\n if (!this.bufferBasedABR) {\n this.mainSegmentLoader_.on('progress', () => {\n this.trigger('progress');\n });\n }\n this.mainSegmentLoader_.on('error', () => {\n const error = this.mainSegmentLoader_.error();\n this.excludePlaylist({\n playlistToExclude: error.playlist,\n error\n });\n });\n this.mainSegmentLoader_.on('appenderror', () => {\n this.error = this.mainSegmentLoader_.error_;\n this.trigger('error');\n });\n this.mainSegmentLoader_.on('syncinfoupdate', () => {\n this.onSyncInfoUpdate_();\n });\n this.mainSegmentLoader_.on('timestampoffset', () => {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-timestamp-offset'\n });\n });\n this.audioSegmentLoader_.on('syncinfoupdate', () => {\n this.onSyncInfoUpdate_();\n });\n this.audioSegmentLoader_.on('appenderror', () => {\n this.error = this.audioSegmentLoader_.error_;\n this.trigger('error');\n });\n this.mainSegmentLoader_.on('ended', () => {\n this.logger_('main segment loader ended');\n this.onEndOfStream();\n });\n this.mainSegmentLoader_.on('earlyabort', event => {\n // never try to early abort with the new ABR algorithm\n if (this.bufferBasedABR) {\n return;\n }\n this.delegateLoaders_('all', ['abort']);\n this.excludePlaylist({\n error: {\n message: 'Aborted early because there isn\\'t enough bandwidth to complete ' + 'the request without rebuffering.'\n },\n playlistExclusionDuration: ABORT_EARLY_EXCLUSION_SECONDS\n });\n });\n const updateCodecs = () => {\n if (!this.sourceUpdater_.hasCreatedSourceBuffers()) {\n return this.tryToCreateSourceBuffers_();\n }\n const codecs = this.getCodecsOrExclude_(); // no codecs means that the playlist was excluded\n\n if (!codecs) {\n return;\n }\n this.sourceUpdater_.addOrChangeSourceBuffers(codecs);\n };\n this.mainSegmentLoader_.on('trackinfo', updateCodecs);\n this.audioSegmentLoader_.on('trackinfo', updateCodecs);\n this.mainSegmentLoader_.on('fmp4', () => {\n if (!this.triggeredFmp4Usage) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-fmp4'\n });\n this.triggeredFmp4Usage = true;\n }\n });\n this.audioSegmentLoader_.on('fmp4', () => {\n if (!this.triggeredFmp4Usage) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-fmp4'\n });\n this.triggeredFmp4Usage = true;\n }\n });\n this.audioSegmentLoader_.on('ended', () => {\n this.logger_('audioSegmentLoader ended');\n this.onEndOfStream();\n });\n const segmentLoaderEvents = ['segmentselected', 'segmentloadstart', 'segmentloaded', 'segmentkeyloadstart', 'segmentkeyloadcomplete', 'segmentdecryptionstart', 'segmentdecryptioncomplete', 'segmenttransmuxingstart', 'segmenttransmuxingcomplete', 'segmenttransmuxingtrackinfoavailable', 'segmenttransmuxingtiminginfoavailable', 'segmentappendstart', 'appendsdone', 'bandwidthupdated', 'timelinechange', 'codecschange'];\n segmentLoaderEvents.forEach(eventName => {\n this.mainSegmentLoader_.on(eventName, metadata => {\n this.player_.trigger(_extends({}, metadata));\n });\n this.audioSegmentLoader_.on(eventName, metadata => {\n this.player_.trigger(_extends({}, metadata));\n });\n this.subtitleSegmentLoader_.on(eventName, metadata => {\n this.player_.trigger(_extends({}, metadata));\n });\n });\n }\n mediaSecondsLoaded_() {\n return Math.max(this.audioSegmentLoader_.mediaSecondsLoaded + this.mainSegmentLoader_.mediaSecondsLoaded);\n }\n /**\n * Call load on our SegmentLoaders\n */\n\n load() {\n this.mainSegmentLoader_.load();\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n this.audioSegmentLoader_.load();\n }\n if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {\n this.subtitleSegmentLoader_.load();\n }\n }\n /**\n * Re-tune playback quality level for the current player\n * conditions. This method will perform destructive actions like removing\n * already buffered content in order to readjust the currently active\n * playlist quickly. This is good for manual quality changes\n *\n * @private\n */\n\n fastQualityChange_(media = this.selectPlaylist()) {\n if (media && media === this.mainPlaylistLoader_.media()) {\n this.logger_('skipping fastQualityChange because new media is same as old');\n return;\n }\n this.switchMedia_(media, 'fast-quality'); // we would like to avoid race condition when we call fastQuality,\n // reset everything and start loading segments from prev segments instead of new because new playlist is not received yet\n\n this.waitingForFastQualityPlaylistReceived_ = true;\n }\n runFastQualitySwitch_() {\n this.waitingForFastQualityPlaylistReceived_ = false; // Delete all buffered data to allow an immediate quality switch.\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.resetEverything(() => {\n this.mainSegmentLoader_.load();\n }); // don't need to reset audio as it is reset when media changes\n }\n /**\n * Begin playback.\n */\n\n play() {\n if (this.setupFirstPlay()) {\n return;\n }\n if (this.tech_.ended()) {\n this.tech_.setCurrentTime(0);\n }\n if (this.hasPlayed_) {\n this.load();\n }\n const seekable = this.tech_.seekable(); // if the viewer has paused and we fell out of the live window,\n // seek forward to the live point\n\n if (this.tech_.duration() === Infinity) {\n if (this.tech_.currentTime() < seekable.start(0)) {\n return this.tech_.setCurrentTime(seekable.end(seekable.length - 1));\n }\n }\n }\n /**\n * Seek to the latest media position if this is a live video and the\n * player and video are loaded and initialized.\n */\n\n setupFirstPlay() {\n const media = this.mainPlaylistLoader_.media(); // Check that everything is ready to begin buffering for the first call to play\n // If 1) there is no active media\n // 2) the player is paused\n // 3) the first play has already been setup\n // then exit early\n\n if (!media || this.tech_.paused() || this.hasPlayed_) {\n return false;\n } // when the video is a live stream and/or has a start time\n\n if (!media.endList || media.start) {\n const seekable = this.seekable();\n if (!seekable.length) {\n // without a seekable range, the player cannot seek to begin buffering at the\n // live or start point\n return false;\n }\n const seekableEnd = seekable.end(0);\n let startPoint = seekableEnd;\n if (media.start) {\n const offset = media.start.timeOffset;\n if (offset < 0) {\n startPoint = Math.max(seekableEnd + offset, seekable.start(0));\n } else {\n startPoint = Math.min(seekableEnd, offset);\n }\n } // trigger firstplay to inform the source handler to ignore the next seek event\n\n this.trigger('firstplay'); // seek to the live point\n\n this.tech_.setCurrentTime(startPoint);\n }\n this.hasPlayed_ = true; // we can begin loading now that everything is ready\n\n this.load();\n return true;\n }\n /**\n * handle the sourceopen event on the MediaSource\n *\n * @private\n */\n\n handleSourceOpen_() {\n // Only attempt to create the source buffer if none already exist.\n // handleSourceOpen is also called when we are \"re-opening\" a source buffer\n // after `endOfStream` has been called (in response to a seek for instance)\n this.tryToCreateSourceBuffers_(); // if autoplay is enabled, begin playback. This is duplicative of\n // code in video.js but is required because play() must be invoked\n // *after* the media source has opened.\n\n if (this.tech_.autoplay()) {\n const playPromise = this.tech_.play(); // Catch/silence error when a pause interrupts a play request\n // on browsers which return a promise\n\n if (typeof playPromise !== 'undefined' && typeof playPromise.then === 'function') {\n playPromise.then(null, e => {});\n }\n }\n this.trigger('sourceopen');\n }\n /**\n * handle the sourceended event on the MediaSource\n *\n * @private\n */\n\n handleSourceEnded_() {\n if (!this.inbandTextTracks_.metadataTrack_) {\n return;\n }\n const cues = this.inbandTextTracks_.metadataTrack_.cues;\n if (!cues || !cues.length) {\n return;\n }\n const duration = this.duration();\n cues[cues.length - 1].endTime = isNaN(duration) || Math.abs(duration) === Infinity ? Number.MAX_VALUE : duration;\n }\n /**\n * handle the durationchange event on the MediaSource\n *\n * @private\n */\n\n handleDurationChange_() {\n this.tech_.trigger('durationchange');\n }\n /**\n * Calls endOfStream on the media source when all active stream types have called\n * endOfStream\n *\n * @param {string} streamType\n * Stream type of the segment loader that called endOfStream\n * @private\n */\n\n onEndOfStream() {\n let isEndOfStream = this.mainSegmentLoader_.ended_;\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n const mainMediaInfo = this.mainSegmentLoader_.getCurrentMediaInfo_(); // if the audio playlist loader exists, then alternate audio is active\n\n if (!mainMediaInfo || mainMediaInfo.hasVideo) {\n // if we do not know if the main segment loader contains video yet or if we\n // definitively know the main segment loader contains video, then we need to wait\n // for both main and audio segment loaders to call endOfStream\n isEndOfStream = isEndOfStream && this.audioSegmentLoader_.ended_;\n } else {\n // otherwise just rely on the audio loader\n isEndOfStream = this.audioSegmentLoader_.ended_;\n }\n }\n if (!isEndOfStream) {\n return;\n }\n this.stopABRTimer_();\n this.sourceUpdater_.endOfStream();\n }\n /**\n * Check if a playlist has stopped being updated\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist has stopped being updated or not\n */\n\n stuckAtPlaylistEnd_(playlist) {\n const seekable = this.seekable();\n if (!seekable.length) {\n // playlist doesn't have enough information to determine whether we are stuck\n return false;\n }\n const expired = this.syncController_.getExpiredTime(playlist, this.duration());\n if (expired === null) {\n return false;\n } // does not use the safe live end to calculate playlist end, since we\n // don't want to say we are stuck while there is still content\n\n const absolutePlaylistEnd = Vhs$1.Playlist.playlistEnd(playlist, expired);\n const currentTime = this.tech_.currentTime();\n const buffered = this.tech_.buffered();\n if (!buffered.length) {\n // return true if the playhead reached the absolute end of the playlist\n return absolutePlaylistEnd - currentTime <= SAFE_TIME_DELTA;\n }\n const bufferedEnd = buffered.end(buffered.length - 1); // return true if there is too little buffer left and buffer has reached absolute\n // end of playlist\n\n return bufferedEnd - currentTime <= SAFE_TIME_DELTA && absolutePlaylistEnd - bufferedEnd <= SAFE_TIME_DELTA;\n }\n /**\n * Exclude a playlist for a set amount of time, making it unavailable for selection by\n * the rendition selection algorithm, then force a new playlist (rendition) selection.\n *\n * @param {Object=} playlistToExclude\n * the playlist to exclude, defaults to the currently selected playlist\n * @param {Object=} error\n * an optional error\n * @param {number=} playlistExclusionDuration\n * an optional number of seconds to exclude the playlist\n */\n\n excludePlaylist({\n playlistToExclude = this.mainPlaylistLoader_.media(),\n error = {},\n playlistExclusionDuration\n }) {\n // If the `error` was generated by the playlist loader, it will contain\n // the playlist we were trying to load (but failed) and that should be\n // excluded instead of the currently selected playlist which is likely\n // out-of-date in this scenario\n playlistToExclude = playlistToExclude || this.mainPlaylistLoader_.media();\n playlistExclusionDuration = playlistExclusionDuration || error.playlistExclusionDuration || this.playlistExclusionDuration; // If there is no current playlist, then an error occurred while we were\n // trying to load the main OR while we were disposing of the tech\n\n if (!playlistToExclude) {\n this.error = error;\n if (this.mediaSource.readyState !== 'open') {\n this.trigger('error');\n } else {\n this.sourceUpdater_.endOfStream('network');\n }\n return;\n }\n playlistToExclude.playlistErrors_++;\n const playlists = this.mainPlaylistLoader_.main.playlists;\n const enabledPlaylists = playlists.filter(isEnabled);\n const isFinalRendition = enabledPlaylists.length === 1 && enabledPlaylists[0] === playlistToExclude; // Don't exclude the only playlist unless it was excluded\n // forever\n\n if (playlists.length === 1 && playlistExclusionDuration !== Infinity) {\n videojs.log.warn(`Problem encountered with playlist ${playlistToExclude.id}. ` + 'Trying again since it is the only playlist.');\n this.tech_.trigger('retryplaylist'); // if this is a final rendition, we should delay\n\n return this.mainPlaylistLoader_.load(isFinalRendition);\n }\n if (isFinalRendition) {\n // If we're content steering, try other pathways.\n if (this.main().contentSteering) {\n const pathway = this.pathwayAttribute_(playlistToExclude); // Ignore at least 1 steering manifest refresh.\n\n const reIncludeDelay = this.contentSteeringController_.steeringManifest.ttl * 1000;\n this.contentSteeringController_.excludePathway(pathway);\n this.excludeThenChangePathway_();\n setTimeout(() => {\n this.contentSteeringController_.addAvailablePathway(pathway);\n }, reIncludeDelay);\n return;\n } // Since we're on the final non-excluded playlist, and we're about to exclude\n // it, instead of erring the player or retrying this playlist, clear out the current\n // exclusion list. This allows other playlists to be attempted in case any have been\n // fixed.\n\n let reincluded = false;\n playlists.forEach(playlist => {\n // skip current playlist which is about to be excluded\n if (playlist === playlistToExclude) {\n return;\n }\n const excludeUntil = playlist.excludeUntil; // a playlist cannot be reincluded if it wasn't excluded to begin with.\n\n if (typeof excludeUntil !== 'undefined' && excludeUntil !== Infinity) {\n reincluded = true;\n delete playlist.excludeUntil;\n }\n });\n if (reincluded) {\n videojs.log.warn('Removing other playlists from the exclusion list because the last ' + 'rendition is about to be excluded.'); // Technically we are retrying a playlist, in that we are simply retrying a previous\n // playlist. This is needed for users relying on the retryplaylist event to catch a\n // case where the player might be stuck and looping through \"dead\" playlists.\n\n this.tech_.trigger('retryplaylist');\n }\n } // Exclude this playlist\n\n let excludeUntil;\n if (playlistToExclude.playlistErrors_ > this.maxPlaylistRetries) {\n excludeUntil = Infinity;\n } else {\n excludeUntil = Date.now() + playlistExclusionDuration * 1000;\n }\n playlistToExclude.excludeUntil = excludeUntil;\n if (error.reason) {\n playlistToExclude.lastExcludeReason_ = error.reason;\n }\n this.tech_.trigger('excludeplaylist');\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-rendition-excluded'\n }); // TODO: only load a new playlist if we're excluding the current playlist\n // If this function was called with a playlist that's not the current active playlist\n // (e.g., media().id !== playlistToExclude.id),\n // then a new playlist should not be selected and loaded, as there's nothing wrong with the current playlist.\n\n const nextPlaylist = this.selectPlaylist();\n if (!nextPlaylist) {\n this.error = 'Playback cannot continue. No available working or supported playlists.';\n this.trigger('error');\n return;\n }\n const logFn = error.internal ? this.logger_ : videojs.log.warn;\n const errorMessage = error.message ? ' ' + error.message : '';\n logFn(`${error.internal ? 'Internal problem' : 'Problem'} encountered with playlist ${playlistToExclude.id}.` + `${errorMessage} Switching to playlist ${nextPlaylist.id}.`); // if audio group changed reset audio loaders\n\n if (nextPlaylist.attributes.AUDIO !== playlistToExclude.attributes.AUDIO) {\n this.delegateLoaders_('audio', ['abort', 'pause']);\n } // if subtitle group changed reset subtitle loaders\n\n if (nextPlaylist.attributes.SUBTITLES !== playlistToExclude.attributes.SUBTITLES) {\n this.delegateLoaders_('subtitle', ['abort', 'pause']);\n }\n this.delegateLoaders_('main', ['abort', 'pause']);\n const delayDuration = nextPlaylist.targetDuration / 2 * 1000 || 5 * 1000;\n const shouldDelay = typeof nextPlaylist.lastRequest === 'number' && Date.now() - nextPlaylist.lastRequest <= delayDuration; // delay if it's a final rendition or if the last refresh is sooner than half targetDuration\n\n return this.switchMedia_(nextPlaylist, 'exclude', isFinalRendition || shouldDelay);\n }\n /**\n * Pause all segment/playlist loaders\n */\n\n pauseLoading() {\n this.delegateLoaders_('all', ['abort', 'pause']);\n this.stopABRTimer_();\n }\n /**\n * Call a set of functions in order on playlist loaders, segment loaders,\n * or both types of loaders.\n *\n * @param {string} filter\n * Filter loaders that should call fnNames using a string. Can be:\n * * all - run on all loaders\n * * audio - run on all audio loaders\n * * subtitle - run on all subtitle loaders\n * * main - run on the main loaders\n *\n * @param {Array|string} fnNames\n * A string or array of function names to call.\n */\n\n delegateLoaders_(filter, fnNames) {\n const loaders = [];\n const dontFilterPlaylist = filter === 'all';\n if (dontFilterPlaylist || filter === 'main') {\n loaders.push(this.mainPlaylistLoader_);\n }\n const mediaTypes = [];\n if (dontFilterPlaylist || filter === 'audio') {\n mediaTypes.push('AUDIO');\n }\n if (dontFilterPlaylist || filter === 'subtitle') {\n mediaTypes.push('CLOSED-CAPTIONS');\n mediaTypes.push('SUBTITLES');\n }\n mediaTypes.forEach(mediaType => {\n const loader = this.mediaTypes_[mediaType] && this.mediaTypes_[mediaType].activePlaylistLoader;\n if (loader) {\n loaders.push(loader);\n }\n });\n ['main', 'audio', 'subtitle'].forEach(name => {\n const loader = this[`${name}SegmentLoader_`];\n if (loader && (filter === name || filter === 'all')) {\n loaders.push(loader);\n }\n });\n loaders.forEach(loader => fnNames.forEach(fnName => {\n if (typeof loader[fnName] === 'function') {\n loader[fnName]();\n }\n }));\n }\n /**\n * set the current time on all segment loaders\n *\n * @param {TimeRange} currentTime the current time to set\n * @return {TimeRange} the current time\n */\n\n setCurrentTime(currentTime) {\n const buffered = findRange(this.tech_.buffered(), currentTime);\n if (!(this.mainPlaylistLoader_ && this.mainPlaylistLoader_.media())) {\n // return immediately if the metadata is not ready yet\n return 0;\n } // it's clearly an edge-case but don't thrown an error if asked to\n // seek within an empty playlist\n\n if (!this.mainPlaylistLoader_.media().segments) {\n return 0;\n } // if the seek location is already buffered, continue buffering as usual\n\n if (buffered && buffered.length) {\n return currentTime;\n } // cancel outstanding requests so we begin buffering at the new\n // location\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.resetEverything();\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n this.audioSegmentLoader_.pause();\n this.audioSegmentLoader_.resetEverything();\n }\n if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {\n this.subtitleSegmentLoader_.pause();\n this.subtitleSegmentLoader_.resetEverything();\n } // start segment loader loading in case they are paused\n\n this.load();\n }\n /**\n * get the current duration\n *\n * @return {TimeRange} the duration\n */\n\n duration() {\n if (!this.mainPlaylistLoader_) {\n return 0;\n }\n const media = this.mainPlaylistLoader_.media();\n if (!media) {\n // no playlists loaded yet, so can't determine a duration\n return 0;\n } // Don't rely on the media source for duration in the case of a live playlist since\n // setting the native MediaSource's duration to infinity ends up with consequences to\n // seekable behavior. See https://github.com/w3c/media-source/issues/5 for details.\n //\n // This is resolved in the spec by https://github.com/w3c/media-source/pull/92,\n // however, few browsers have support for setLiveSeekableRange()\n // https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/setLiveSeekableRange\n //\n // Until a time when the duration of the media source can be set to infinity, and a\n // seekable range specified across browsers, just return Infinity.\n\n if (!media.endList) {\n return Infinity;\n } // Since this is a VOD video, it is safe to rely on the media source's duration (if\n // available). If it's not available, fall back to a playlist-calculated estimate.\n\n if (this.mediaSource) {\n return this.mediaSource.duration;\n }\n return Vhs$1.Playlist.duration(media);\n }\n /**\n * check the seekable range\n *\n * @return {TimeRange} the seekable range\n */\n\n seekable() {\n return this.seekable_;\n }\n onSyncInfoUpdate_() {\n let audioSeekable; // TODO check for creation of both source buffers before updating seekable\n //\n // A fix was made to this function where a check for\n // this.sourceUpdater_.hasCreatedSourceBuffers\n // was added to ensure that both source buffers were created before seekable was\n // updated. However, it originally had a bug where it was checking for a true and\n // returning early instead of checking for false. Setting it to check for false to\n // return early though created other issues. A call to play() would check for seekable\n // end without verifying that a seekable range was present. In addition, even checking\n // for that didn't solve some issues, as handleFirstPlay is sometimes worked around\n // due to a media update calling load on the segment loaders, skipping a seek to live,\n // thereby starting live streams at the beginning of the stream rather than at the end.\n //\n // This conditional should be fixed to wait for the creation of two source buffers at\n // the same time as the other sections of code are fixed to properly seek to live and\n // not throw an error due to checking for a seekable end when no seekable range exists.\n //\n // For now, fall back to the older behavior, with the understanding that the seekable\n // range may not be completely correct, leading to a suboptimal initial live point.\n\n if (!this.mainPlaylistLoader_) {\n return;\n }\n let media = this.mainPlaylistLoader_.media();\n if (!media) {\n return;\n }\n let expired = this.syncController_.getExpiredTime(media, this.duration());\n if (expired === null) {\n // not enough information to update seekable\n return;\n }\n const main = this.mainPlaylistLoader_.main;\n const mainSeekable = Vhs$1.Playlist.seekable(media, expired, Vhs$1.Playlist.liveEdgeDelay(main, media));\n if (mainSeekable.length === 0) {\n return;\n }\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n media = this.mediaTypes_.AUDIO.activePlaylistLoader.media();\n expired = this.syncController_.getExpiredTime(media, this.duration());\n if (expired === null) {\n return;\n }\n audioSeekable = Vhs$1.Playlist.seekable(media, expired, Vhs$1.Playlist.liveEdgeDelay(main, media));\n if (audioSeekable.length === 0) {\n return;\n }\n }\n let oldEnd;\n let oldStart;\n if (this.seekable_ && this.seekable_.length) {\n oldEnd = this.seekable_.end(0);\n oldStart = this.seekable_.start(0);\n }\n if (!audioSeekable) {\n // seekable has been calculated based on buffering video data so it\n // can be returned directly\n this.seekable_ = mainSeekable;\n } else if (audioSeekable.start(0) > mainSeekable.end(0) || mainSeekable.start(0) > audioSeekable.end(0)) {\n // seekables are pretty far off, rely on main\n this.seekable_ = mainSeekable;\n } else {\n this.seekable_ = createTimeRanges([[audioSeekable.start(0) > mainSeekable.start(0) ? audioSeekable.start(0) : mainSeekable.start(0), audioSeekable.end(0) < mainSeekable.end(0) ? audioSeekable.end(0) : mainSeekable.end(0)]]);\n } // seekable is the same as last time\n\n if (this.seekable_ && this.seekable_.length) {\n if (this.seekable_.end(0) === oldEnd && this.seekable_.start(0) === oldStart) {\n return;\n }\n }\n this.logger_(`seekable updated [${printableRange(this.seekable_)}]`);\n const metadata = {\n seekableRanges: this.seekable_\n };\n this.trigger({\n type: 'seekablerangeschanged',\n metadata\n });\n this.tech_.trigger('seekablechanged');\n }\n /**\n * Update the player duration\n */\n\n updateDuration(isLive) {\n if (this.updateDuration_) {\n this.mediaSource.removeEventListener('sourceopen', this.updateDuration_);\n this.updateDuration_ = null;\n }\n if (this.mediaSource.readyState !== 'open') {\n this.updateDuration_ = this.updateDuration.bind(this, isLive);\n this.mediaSource.addEventListener('sourceopen', this.updateDuration_);\n return;\n }\n if (isLive) {\n const seekable = this.seekable();\n if (!seekable.length) {\n return;\n } // Even in the case of a live playlist, the native MediaSource's duration should not\n // be set to Infinity (even though this would be expected for a live playlist), since\n // setting the native MediaSource's duration to infinity ends up with consequences to\n // seekable behavior. See https://github.com/w3c/media-source/issues/5 for details.\n //\n // This is resolved in the spec by https://github.com/w3c/media-source/pull/92,\n // however, few browsers have support for setLiveSeekableRange()\n // https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/setLiveSeekableRange\n //\n // Until a time when the duration of the media source can be set to infinity, and a\n // seekable range specified across browsers, the duration should be greater than or\n // equal to the last possible seekable value.\n // MediaSource duration starts as NaN\n // It is possible (and probable) that this case will never be reached for many\n // sources, since the MediaSource reports duration as the highest value without\n // accounting for timestamp offset. For example, if the timestamp offset is -100 and\n // we buffered times 0 to 100 with real times of 100 to 200, even though current\n // time will be between 0 and 100, the native media source may report the duration\n // as 200. However, since we report duration separate from the media source (as\n // Infinity), and as long as the native media source duration value is greater than\n // our reported seekable range, seeks will work as expected. The large number as\n // duration for live is actually a strategy used by some players to work around the\n // issue of live seekable ranges cited above.\n\n if (isNaN(this.mediaSource.duration) || this.mediaSource.duration < seekable.end(seekable.length - 1)) {\n this.sourceUpdater_.setDuration(seekable.end(seekable.length - 1));\n }\n return;\n }\n const buffered = this.tech_.buffered();\n let duration = Vhs$1.Playlist.duration(this.mainPlaylistLoader_.media());\n if (buffered.length > 0) {\n duration = Math.max(duration, buffered.end(buffered.length - 1));\n }\n if (this.mediaSource.duration !== duration) {\n this.sourceUpdater_.setDuration(duration);\n }\n }\n /**\n * dispose of the PlaylistController and everything\n * that it controls\n */\n\n dispose() {\n this.trigger('dispose');\n this.decrypter_.terminate();\n this.mainPlaylistLoader_.dispose();\n this.mainSegmentLoader_.dispose();\n this.contentSteeringController_.dispose();\n this.keyStatusMap_.clear();\n if (this.loadOnPlay_) {\n this.tech_.off('play', this.loadOnPlay_);\n }\n ['AUDIO', 'SUBTITLES'].forEach(type => {\n const groups = this.mediaTypes_[type].groups;\n for (const id in groups) {\n groups[id].forEach(group => {\n if (group.playlistLoader) {\n group.playlistLoader.dispose();\n }\n });\n }\n });\n this.audioSegmentLoader_.dispose();\n this.subtitleSegmentLoader_.dispose();\n this.sourceUpdater_.dispose();\n this.timelineChangeController_.dispose();\n this.stopABRTimer_();\n if (this.updateDuration_) {\n this.mediaSource.removeEventListener('sourceopen', this.updateDuration_);\n }\n this.mediaSource.removeEventListener('durationchange', this.handleDurationChange_); // load the media source into the player\n\n this.mediaSource.removeEventListener('sourceopen', this.handleSourceOpen_);\n this.mediaSource.removeEventListener('sourceended', this.handleSourceEnded_);\n this.off();\n }\n /**\n * return the main playlist object if we have one\n *\n * @return {Object} the main playlist object that we parsed\n */\n\n main() {\n return this.mainPlaylistLoader_.main;\n }\n /**\n * return the currently selected playlist\n *\n * @return {Object} the currently selected playlist object that we parsed\n */\n\n media() {\n // playlist loader will not return media if it has not been fully loaded\n return this.mainPlaylistLoader_.media() || this.initialMedia_;\n }\n areMediaTypesKnown_() {\n const usingAudioLoader = !!this.mediaTypes_.AUDIO.activePlaylistLoader;\n const hasMainMediaInfo = !!this.mainSegmentLoader_.getCurrentMediaInfo_(); // if we are not using an audio loader, then we have audio media info\n // otherwise check on the segment loader.\n\n const hasAudioMediaInfo = !usingAudioLoader ? true : !!this.audioSegmentLoader_.getCurrentMediaInfo_(); // one or both loaders has not loaded sufficently to get codecs\n\n if (!hasMainMediaInfo || !hasAudioMediaInfo) {\n return false;\n }\n return true;\n } // find from and to for codec switch event\n\n getCodecsOrExclude_() {\n const media = {\n main: this.mainSegmentLoader_.getCurrentMediaInfo_() || {},\n audio: this.audioSegmentLoader_.getCurrentMediaInfo_() || {}\n };\n const playlist = this.mainSegmentLoader_.getPendingSegmentPlaylist() || this.media(); // set \"main\" media equal to video\n\n media.video = media.main;\n const playlistCodecs = codecsForPlaylist(this.main(), playlist);\n const codecs = {};\n const usingAudioLoader = !!this.mediaTypes_.AUDIO.activePlaylistLoader;\n if (media.main.hasVideo) {\n codecs.video = playlistCodecs.video || media.main.videoCodec || DEFAULT_VIDEO_CODEC;\n }\n if (media.main.isMuxed) {\n codecs.video += `,${playlistCodecs.audio || media.main.audioCodec || DEFAULT_AUDIO_CODEC}`;\n }\n if (media.main.hasAudio && !media.main.isMuxed || media.audio.hasAudio || usingAudioLoader) {\n codecs.audio = playlistCodecs.audio || media.main.audioCodec || media.audio.audioCodec || DEFAULT_AUDIO_CODEC; // set audio isFmp4 so we use the correct \"supports\" function below\n\n media.audio.isFmp4 = media.main.hasAudio && !media.main.isMuxed ? media.main.isFmp4 : media.audio.isFmp4;\n } // no codecs, no playback.\n\n if (!codecs.audio && !codecs.video) {\n this.excludePlaylist({\n playlistToExclude: playlist,\n error: {\n message: 'Could not determine codecs for playlist.'\n },\n playlistExclusionDuration: Infinity\n });\n return;\n } // fmp4 relies on browser support, while ts relies on muxer support\n\n const supportFunction = (isFmp4, codec) => isFmp4 ? browserSupportsCodec(codec) : muxerSupportsCodec(codec);\n const unsupportedCodecs = {};\n let unsupportedAudio;\n ['video', 'audio'].forEach(function (type) {\n if (codecs.hasOwnProperty(type) && !supportFunction(media[type].isFmp4, codecs[type])) {\n const supporter = media[type].isFmp4 ? 'browser' : 'muxer';\n unsupportedCodecs[supporter] = unsupportedCodecs[supporter] || [];\n unsupportedCodecs[supporter].push(codecs[type]);\n if (type === 'audio') {\n unsupportedAudio = supporter;\n }\n }\n });\n if (usingAudioLoader && unsupportedAudio && playlist.attributes.AUDIO) {\n const audioGroup = playlist.attributes.AUDIO;\n this.main().playlists.forEach(variant => {\n const variantAudioGroup = variant.attributes && variant.attributes.AUDIO;\n if (variantAudioGroup === audioGroup && variant !== playlist) {\n variant.excludeUntil = Infinity;\n }\n });\n this.logger_(`excluding audio group ${audioGroup} as ${unsupportedAudio} does not support codec(s): \"${codecs.audio}\"`);\n } // if we have any unsupported codecs exclude this playlist.\n\n if (Object.keys(unsupportedCodecs).length) {\n const message = Object.keys(unsupportedCodecs).reduce((acc, supporter) => {\n if (acc) {\n acc += ', ';\n }\n acc += `${supporter} does not support codec(s): \"${unsupportedCodecs[supporter].join(',')}\"`;\n return acc;\n }, '') + '.';\n this.excludePlaylist({\n playlistToExclude: playlist,\n error: {\n internal: true,\n message\n },\n playlistExclusionDuration: Infinity\n });\n return;\n } // check if codec switching is happening\n\n if (this.sourceUpdater_.hasCreatedSourceBuffers() && !this.sourceUpdater_.canChangeType()) {\n const switchMessages = [];\n ['video', 'audio'].forEach(type => {\n const newCodec = (parseCodecs(this.sourceUpdater_.codecs[type] || '')[0] || {}).type;\n const oldCodec = (parseCodecs(codecs[type] || '')[0] || {}).type;\n if (newCodec && oldCodec && newCodec.toLowerCase() !== oldCodec.toLowerCase()) {\n switchMessages.push(`\"${this.sourceUpdater_.codecs[type]}\" -> \"${codecs[type]}\"`);\n }\n });\n if (switchMessages.length) {\n this.excludePlaylist({\n playlistToExclude: playlist,\n error: {\n message: `Codec switching not supported: ${switchMessages.join(', ')}.`,\n internal: true\n },\n playlistExclusionDuration: Infinity\n });\n return;\n }\n } // TODO: when using the muxer shouldn't we just return\n // the codecs that the muxer outputs?\n\n return codecs;\n }\n /**\n * Create source buffers and exlude any incompatible renditions.\n *\n * @private\n */\n\n tryToCreateSourceBuffers_() {\n // media source is not ready yet or sourceBuffers are already\n // created.\n if (this.mediaSource.readyState !== 'open' || this.sourceUpdater_.hasCreatedSourceBuffers()) {\n return;\n }\n if (!this.areMediaTypesKnown_()) {\n return;\n }\n const codecs = this.getCodecsOrExclude_(); // no codecs means that the playlist was excluded\n\n if (!codecs) {\n return;\n }\n this.sourceUpdater_.createSourceBuffers(codecs);\n const codecString = [codecs.video, codecs.audio].filter(Boolean).join(',');\n this.excludeIncompatibleVariants_(codecString);\n }\n /**\n * Excludes playlists with codecs that are unsupported by the muxer and browser.\n */\n\n excludeUnsupportedVariants_() {\n const playlists = this.main().playlists;\n const ids = []; // TODO: why don't we have a property to loop through all\n // playlist? Why did we ever mix indexes and keys?\n\n Object.keys(playlists).forEach(key => {\n const variant = playlists[key]; // check if we already processed this playlist.\n\n if (ids.indexOf(variant.id) !== -1) {\n return;\n }\n ids.push(variant.id);\n const codecs = codecsForPlaylist(this.main, variant);\n const unsupported = [];\n if (codecs.audio && !muxerSupportsCodec(codecs.audio) && !browserSupportsCodec(codecs.audio)) {\n unsupported.push(`audio codec ${codecs.audio}`);\n }\n if (codecs.video && !muxerSupportsCodec(codecs.video) && !browserSupportsCodec(codecs.video)) {\n unsupported.push(`video codec ${codecs.video}`);\n }\n if (codecs.text && codecs.text === 'stpp.ttml.im1t') {\n unsupported.push(`text codec ${codecs.text}`);\n }\n if (unsupported.length) {\n variant.excludeUntil = Infinity;\n this.logger_(`excluding ${variant.id} for unsupported: ${unsupported.join(', ')}`);\n }\n });\n }\n /**\n * Exclude playlists that are known to be codec or\n * stream-incompatible with the SourceBuffer configuration. For\n * instance, Media Source Extensions would cause the video element to\n * stall waiting for video data if you switched from a variant with\n * video and audio to an audio-only one.\n *\n * @param {Object} media a media playlist compatible with the current\n * set of SourceBuffers. Variants in the current main playlist that\n * do not appear to have compatible codec or stream configurations\n * will be excluded from the default playlist selection algorithm\n * indefinitely.\n * @private\n */\n\n excludeIncompatibleVariants_(codecString) {\n const ids = [];\n const playlists = this.main().playlists;\n const codecs = unwrapCodecList(parseCodecs(codecString));\n const codecCount_ = codecCount(codecs);\n const videoDetails = codecs.video && parseCodecs(codecs.video)[0] || null;\n const audioDetails = codecs.audio && parseCodecs(codecs.audio)[0] || null;\n Object.keys(playlists).forEach(key => {\n const variant = playlists[key]; // check if we already processed this playlist.\n // or it if it is already excluded forever.\n\n if (ids.indexOf(variant.id) !== -1 || variant.excludeUntil === Infinity) {\n return;\n }\n ids.push(variant.id);\n const exclusionReasons = []; // get codecs from the playlist for this variant\n\n const variantCodecs = codecsForPlaylist(this.mainPlaylistLoader_.main, variant);\n const variantCodecCount = codecCount(variantCodecs); // if no codecs are listed, we cannot determine that this\n // variant is incompatible. Wait for mux.js to probe\n\n if (!variantCodecs.audio && !variantCodecs.video) {\n return;\n } // TODO: we can support this by removing the\n // old media source and creating a new one, but it will take some work.\n // The number of streams cannot change\n\n if (variantCodecCount !== codecCount_) {\n exclusionReasons.push(`codec count \"${variantCodecCount}\" !== \"${codecCount_}\"`);\n } // only exclude playlists by codec change, if codecs cannot switch\n // during playback.\n\n if (!this.sourceUpdater_.canChangeType()) {\n const variantVideoDetails = variantCodecs.video && parseCodecs(variantCodecs.video)[0] || null;\n const variantAudioDetails = variantCodecs.audio && parseCodecs(variantCodecs.audio)[0] || null; // the video codec cannot change\n\n if (variantVideoDetails && videoDetails && variantVideoDetails.type.toLowerCase() !== videoDetails.type.toLowerCase()) {\n exclusionReasons.push(`video codec \"${variantVideoDetails.type}\" !== \"${videoDetails.type}\"`);\n } // the audio codec cannot change\n\n if (variantAudioDetails && audioDetails && variantAudioDetails.type.toLowerCase() !== audioDetails.type.toLowerCase()) {\n exclusionReasons.push(`audio codec \"${variantAudioDetails.type}\" !== \"${audioDetails.type}\"`);\n }\n }\n if (exclusionReasons.length) {\n variant.excludeUntil = Infinity;\n this.logger_(`excluding ${variant.id}: ${exclusionReasons.join(' && ')}`);\n }\n });\n }\n updateAdCues_(media) {\n let offset = 0;\n const seekable = this.seekable();\n if (seekable.length) {\n offset = seekable.start(0);\n }\n updateAdCues(media, this.cueTagsTrack_, offset);\n }\n /**\n * Calculates the desired forward buffer length based on current time\n *\n * @return {number} Desired forward buffer length in seconds\n */\n\n goalBufferLength() {\n const currentTime = this.tech_.currentTime();\n const initial = Config.GOAL_BUFFER_LENGTH;\n const rate = Config.GOAL_BUFFER_LENGTH_RATE;\n const max = Math.max(initial, Config.MAX_GOAL_BUFFER_LENGTH);\n return Math.min(initial + currentTime * rate, max);\n }\n /**\n * Calculates the desired buffer low water line based on current time\n *\n * @return {number} Desired buffer low water line in seconds\n */\n\n bufferLowWaterLine() {\n const currentTime = this.tech_.currentTime();\n const initial = Config.BUFFER_LOW_WATER_LINE;\n const rate = Config.BUFFER_LOW_WATER_LINE_RATE;\n const max = Math.max(initial, Config.MAX_BUFFER_LOW_WATER_LINE);\n const newMax = Math.max(initial, Config.EXPERIMENTAL_MAX_BUFFER_LOW_WATER_LINE);\n return Math.min(initial + currentTime * rate, this.bufferBasedABR ? newMax : max);\n }\n bufferHighWaterLine() {\n return Config.BUFFER_HIGH_WATER_LINE;\n }\n addDateRangesToTextTrack_(dateRanges) {\n createMetadataTrackIfNotExists(this.inbandTextTracks_, 'com.apple.streaming', this.tech_);\n addDateRangeMetadata({\n inbandTextTracks: this.inbandTextTracks_,\n dateRanges\n });\n }\n addMetadataToTextTrack(dispatchType, metadataArray, videoDuration) {\n const timestampOffset = this.sourceUpdater_.videoBuffer ? this.sourceUpdater_.videoTimestampOffset() : this.sourceUpdater_.audioTimestampOffset(); // There's potentially an issue where we could double add metadata if there's a muxed\n // audio/video source with a metadata track, and an alt audio with a metadata track.\n // However, this probably won't happen, and if it does it can be handled then.\n\n createMetadataTrackIfNotExists(this.inbandTextTracks_, dispatchType, this.tech_);\n addMetadata({\n inbandTextTracks: this.inbandTextTracks_,\n metadataArray,\n timestampOffset,\n videoDuration\n });\n }\n /**\n * Utility for getting the pathway or service location from an HLS or DASH playlist.\n *\n * @param {Object} playlist for getting pathway from.\n * @return the pathway attribute of a playlist\n */\n\n pathwayAttribute_(playlist) {\n return playlist.attributes['PATHWAY-ID'] || playlist.attributes.serviceLocation;\n }\n /**\n * Initialize available pathways and apply the tag properties.\n */\n\n initContentSteeringController_() {\n const main = this.main();\n if (!main.contentSteering) {\n return;\n }\n for (const playlist of main.playlists) {\n this.contentSteeringController_.addAvailablePathway(this.pathwayAttribute_(playlist));\n }\n this.contentSteeringController_.assignTagProperties(main.uri, main.contentSteering); // request the steering manifest immediately if queryBeforeStart is set.\n\n if (this.contentSteeringController_.queryBeforeStart) {\n // When queryBeforeStart is true, initial request should omit steering parameters.\n this.contentSteeringController_.requestSteeringManifest(true);\n return;\n } // otherwise start content steering after playback starts\n\n this.tech_.one('canplay', () => {\n this.contentSteeringController_.requestSteeringManifest();\n });\n }\n /**\n * Reset the content steering controller and re-init.\n */\n\n resetContentSteeringController_() {\n this.contentSteeringController_.clearAvailablePathways();\n this.contentSteeringController_.dispose();\n this.initContentSteeringController_();\n }\n /**\n * Attaches the listeners for content steering.\n */\n\n attachContentSteeringListeners_() {\n this.contentSteeringController_.on('content-steering', this.excludeThenChangePathway_.bind(this));\n const contentSteeringEvents = ['contentsteeringloadstart', 'contentsteeringloadcomplete', 'contentsteeringparsed'];\n contentSteeringEvents.forEach(eventName => {\n this.contentSteeringController_.on(eventName, metadata => {\n this.trigger(_extends({}, metadata));\n });\n });\n if (this.sourceType_ === 'dash') {\n this.mainPlaylistLoader_.on('loadedplaylist', () => {\n const main = this.main(); // check if steering tag or pathways changed.\n\n const didDashTagChange = this.contentSteeringController_.didDASHTagChange(main.uri, main.contentSteering);\n const didPathwaysChange = () => {\n const availablePathways = this.contentSteeringController_.getAvailablePathways();\n const newPathways = [];\n for (const playlist of main.playlists) {\n const serviceLocation = playlist.attributes.serviceLocation;\n if (serviceLocation) {\n newPathways.push(serviceLocation);\n if (!availablePathways.has(serviceLocation)) {\n return true;\n }\n }\n } // If we have no new serviceLocations and previously had availablePathways\n\n if (!newPathways.length && availablePathways.size) {\n return true;\n }\n return false;\n };\n if (didDashTagChange || didPathwaysChange()) {\n this.resetContentSteeringController_();\n }\n });\n }\n }\n /**\n * Simple exclude and change playlist logic for content steering.\n */\n\n excludeThenChangePathway_() {\n const currentPathway = this.contentSteeringController_.getPathway();\n if (!currentPathway) {\n return;\n }\n this.handlePathwayClones_();\n const main = this.main();\n const playlists = main.playlists;\n const ids = new Set();\n let didEnablePlaylists = false;\n Object.keys(playlists).forEach(key => {\n const variant = playlists[key];\n const pathwayId = this.pathwayAttribute_(variant);\n const differentPathwayId = pathwayId && currentPathway !== pathwayId;\n const steeringExclusion = variant.excludeUntil === Infinity && variant.lastExcludeReason_ === 'content-steering';\n if (steeringExclusion && !differentPathwayId) {\n delete variant.excludeUntil;\n delete variant.lastExcludeReason_;\n didEnablePlaylists = true;\n }\n const noExcludeUntil = !variant.excludeUntil && variant.excludeUntil !== Infinity;\n const shouldExclude = !ids.has(variant.id) && differentPathwayId && noExcludeUntil;\n if (!shouldExclude) {\n return;\n }\n ids.add(variant.id);\n variant.excludeUntil = Infinity;\n variant.lastExcludeReason_ = 'content-steering'; // TODO: kind of spammy, maybe move this.\n\n this.logger_(`excluding ${variant.id} for ${variant.lastExcludeReason_}`);\n });\n if (this.contentSteeringController_.manifestType_ === 'DASH') {\n Object.keys(this.mediaTypes_).forEach(key => {\n const type = this.mediaTypes_[key];\n if (type.activePlaylistLoader) {\n const currentPlaylist = type.activePlaylistLoader.media_; // Check if the current media playlist matches the current CDN\n\n if (currentPlaylist && currentPlaylist.attributes.serviceLocation !== currentPathway) {\n didEnablePlaylists = true;\n }\n }\n });\n }\n if (didEnablePlaylists) {\n this.changeSegmentPathway_();\n }\n }\n /**\n * Add, update, or delete playlists and media groups for\n * the pathway clones for HLS Content Steering.\n *\n * See https://datatracker.ietf.org/doc/draft-pantos-hls-rfc8216bis/\n *\n * NOTE: Pathway cloning does not currently support the `PER_VARIANT_URIS` and\n * `PER_RENDITION_URIS` as we do not handle `STABLE-VARIANT-ID` or\n * `STABLE-RENDITION-ID` values.\n */\n\n handlePathwayClones_() {\n const main = this.main();\n const playlists = main.playlists;\n const currentPathwayClones = this.contentSteeringController_.currentPathwayClones;\n const nextPathwayClones = this.contentSteeringController_.nextPathwayClones;\n const hasClones = currentPathwayClones && currentPathwayClones.size || nextPathwayClones && nextPathwayClones.size;\n if (!hasClones) {\n return;\n }\n for (const [id, clone] of currentPathwayClones.entries()) {\n const newClone = nextPathwayClones.get(id); // Delete the old pathway clone.\n\n if (!newClone) {\n this.mainPlaylistLoader_.updateOrDeleteClone(clone);\n this.contentSteeringController_.excludePathway(id);\n }\n }\n for (const [id, clone] of nextPathwayClones.entries()) {\n const oldClone = currentPathwayClones.get(id); // Create a new pathway if it is a new pathway clone object.\n\n if (!oldClone) {\n const playlistsToClone = playlists.filter(p => {\n return p.attributes['PATHWAY-ID'] === clone['BASE-ID'];\n });\n playlistsToClone.forEach(p => {\n this.mainPlaylistLoader_.addClonePathway(clone, p);\n });\n this.contentSteeringController_.addAvailablePathway(id);\n continue;\n } // There have not been changes to the pathway clone object, so skip.\n\n if (this.equalPathwayClones_(oldClone, clone)) {\n continue;\n } // Update a preexisting cloned pathway.\n // True is set for the update flag.\n\n this.mainPlaylistLoader_.updateOrDeleteClone(clone, true);\n this.contentSteeringController_.addAvailablePathway(id);\n } // Deep copy contents of next to current pathways.\n\n this.contentSteeringController_.currentPathwayClones = new Map(JSON.parse(JSON.stringify([...nextPathwayClones])));\n }\n /**\n * Determines whether two pathway clone objects are equivalent.\n *\n * @param {Object} a The first pathway clone object.\n * @param {Object} b The second pathway clone object.\n * @return {boolean} True if the pathway clone objects are equal, false otherwise.\n */\n\n equalPathwayClones_(a, b) {\n if (a['BASE-ID'] !== b['BASE-ID'] || a.ID !== b.ID || a['URI-REPLACEMENT'].HOST !== b['URI-REPLACEMENT'].HOST) {\n return false;\n }\n const aParams = a['URI-REPLACEMENT'].PARAMS;\n const bParams = b['URI-REPLACEMENT'].PARAMS; // We need to iterate through both lists of params because one could be\n // missing a parameter that the other has.\n\n for (const p in aParams) {\n if (aParams[p] !== bParams[p]) {\n return false;\n }\n }\n for (const p in bParams) {\n if (aParams[p] !== bParams[p]) {\n return false;\n }\n }\n return true;\n }\n /**\n * Changes the current playlists for audio, video and subtitles after a new pathway\n * is chosen from content steering.\n */\n\n changeSegmentPathway_() {\n const nextPlaylist = this.selectPlaylist();\n this.pauseLoading(); // Switch audio and text track playlists if necessary in DASH\n\n if (this.contentSteeringController_.manifestType_ === 'DASH') {\n this.switchMediaForDASHContentSteering_();\n }\n this.switchMedia_(nextPlaylist, 'content-steering');\n }\n /**\n * Iterates through playlists and check their keyId set and compare with the\n * keyStatusMap, only enable playlists that have a usable key. If the playlist\n * has no keyId leave it enabled by default.\n */\n\n excludeNonUsablePlaylistsByKeyId_() {\n if (!this.mainPlaylistLoader_ || !this.mainPlaylistLoader_.main) {\n return;\n }\n let nonUsableKeyStatusCount = 0;\n const NON_USABLE = 'non-usable';\n this.mainPlaylistLoader_.main.playlists.forEach(playlist => {\n const keyIdSet = this.mainPlaylistLoader_.getKeyIdSet(playlist); // If the playlist doesn't have keyIDs lets not exclude it.\n\n if (!keyIdSet || !keyIdSet.size) {\n return;\n }\n keyIdSet.forEach(key => {\n const USABLE = 'usable';\n const hasUsableKeyStatus = this.keyStatusMap_.has(key) && this.keyStatusMap_.get(key) === USABLE;\n const nonUsableExclusion = playlist.lastExcludeReason_ === NON_USABLE && playlist.excludeUntil === Infinity;\n if (!hasUsableKeyStatus) {\n // Only exclude playlists that haven't already been excluded as non-usable.\n if (playlist.excludeUntil !== Infinity && playlist.lastExcludeReason_ !== NON_USABLE) {\n playlist.excludeUntil = Infinity;\n playlist.lastExcludeReason_ = NON_USABLE;\n this.logger_(`excluding playlist ${playlist.id} because the key ID ${key} doesn't exist in the keyStatusMap or is not ${USABLE}`);\n } // count all nonUsableKeyStatus\n\n nonUsableKeyStatusCount++;\n } else if (hasUsableKeyStatus && nonUsableExclusion) {\n delete playlist.excludeUntil;\n delete playlist.lastExcludeReason_;\n this.logger_(`enabling playlist ${playlist.id} because key ID ${key} is ${USABLE}`);\n }\n });\n }); // If for whatever reason every playlist has a non usable key status. Lets try re-including the SD renditions as a failsafe.\n\n if (nonUsableKeyStatusCount >= this.mainPlaylistLoader_.main.playlists.length) {\n this.mainPlaylistLoader_.main.playlists.forEach(playlist => {\n const isNonHD = playlist && playlist.attributes && playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.height < 720;\n const excludedForNonUsableKey = playlist.excludeUntil === Infinity && playlist.lastExcludeReason_ === NON_USABLE;\n if (isNonHD && excludedForNonUsableKey) {\n // Only delete the excludeUntil so we don't try and re-exclude these playlists.\n delete playlist.excludeUntil;\n videojs.log.warn(`enabling non-HD playlist ${playlist.id} because all playlists were excluded due to ${NON_USABLE} key IDs`);\n }\n });\n }\n }\n /**\n * Adds a keystatus to the keystatus map, tries to convert to string if necessary.\n *\n * @param {any} keyId the keyId to add a status for\n * @param {string} status the status of the keyId\n */\n\n addKeyStatus_(keyId, status) {\n const isString = typeof keyId === 'string';\n const keyIdHexString = isString ? keyId : bufferToHexString(keyId);\n const formattedKeyIdString = keyIdHexString.slice(0, 32).toLowerCase();\n this.logger_(`KeyStatus '${status}' with key ID ${formattedKeyIdString} added to the keyStatusMap`);\n this.keyStatusMap_.set(formattedKeyIdString, status);\n }\n /**\n * Utility function for adding key status to the keyStatusMap and filtering usable encrypted playlists.\n *\n * @param {any} keyId the keyId from the keystatuschange event\n * @param {string} status the key status string\n */\n\n updatePlaylistByKeyStatus(keyId, status) {\n this.addKeyStatus_(keyId, status);\n if (!this.waitingForFastQualityPlaylistReceived_) {\n this.excludeNonUsableThenChangePlaylist_();\n } // Listen to loadedplaylist with a single listener and check for new contentProtection elements when a playlist is updated.\n\n this.mainPlaylistLoader_.off('loadedplaylist', this.excludeNonUsableThenChangePlaylist_.bind(this));\n this.mainPlaylistLoader_.on('loadedplaylist', this.excludeNonUsableThenChangePlaylist_.bind(this));\n }\n excludeNonUsableThenChangePlaylist_() {\n this.excludeNonUsablePlaylistsByKeyId_();\n this.fastQualityChange_();\n }\n}\n\n/**\n * Returns a function that acts as the Enable/disable playlist function.\n *\n * @param {PlaylistLoader} loader - The main playlist loader\n * @param {string} playlistID - id of the playlist\n * @param {Function} changePlaylistFn - A function to be called after a\n * playlist's enabled-state has been changed. Will NOT be called if a\n * playlist's enabled-state is unchanged\n * @param {boolean=} enable - Value to set the playlist enabled-state to\n * or if undefined returns the current enabled-state for the playlist\n * @return {Function} Function for setting/getting enabled\n */\n\nconst enableFunction = (loader, playlistID, changePlaylistFn) => enable => {\n const playlist = loader.main.playlists[playlistID];\n const incompatible = isIncompatible(playlist);\n const currentlyEnabled = isEnabled(playlist);\n if (typeof enable === 'undefined') {\n return currentlyEnabled;\n }\n if (enable) {\n delete playlist.disabled;\n } else {\n playlist.disabled = true;\n }\n const metadata = {\n renditionInfo: {\n id: playlistID,\n bandwidth: playlist.attributes.BANDWIDTH,\n resolution: playlist.attributes.RESOLUTION,\n codecs: playlist.attributes.CODECS\n },\n cause: 'fast-quality'\n };\n if (enable !== currentlyEnabled && !incompatible) {\n // Ensure the outside world knows about our changes\n changePlaylistFn(playlist);\n if (enable) {\n loader.trigger({\n type: 'renditionenabled',\n metadata\n });\n } else {\n loader.trigger({\n type: 'renditiondisabled',\n metadata\n });\n }\n }\n return enable;\n};\n/**\n * The representation object encapsulates the publicly visible information\n * in a media playlist along with a setter/getter-type function (enabled)\n * for changing the enabled-state of a particular playlist entry\n *\n * @class Representation\n */\n\nclass Representation {\n constructor(vhsHandler, playlist, id) {\n const {\n playlistController_: pc\n } = vhsHandler;\n const qualityChangeFunction = pc.fastQualityChange_.bind(pc); // some playlist attributes are optional\n\n if (playlist.attributes) {\n const resolution = playlist.attributes.RESOLUTION;\n this.width = resolution && resolution.width;\n this.height = resolution && resolution.height;\n this.bandwidth = playlist.attributes.BANDWIDTH;\n this.frameRate = playlist.attributes['FRAME-RATE'];\n }\n this.codecs = codecsForPlaylist(pc.main(), playlist);\n this.playlist = playlist; // The id is simply the ordinality of the media playlist\n // within the main playlist\n\n this.id = id; // Partially-apply the enableFunction to create a playlist-\n // specific variant\n\n this.enabled = enableFunction(vhsHandler.playlists, playlist.id, qualityChangeFunction);\n }\n}\n/**\n * A mixin function that adds the `representations` api to an instance\n * of the VhsHandler class\n *\n * @param {VhsHandler} vhsHandler - An instance of VhsHandler to add the\n * representation API into\n */\n\nconst renditionSelectionMixin = function (vhsHandler) {\n // Add a single API-specific function to the VhsHandler instance\n vhsHandler.representations = () => {\n const main = vhsHandler.playlistController_.main();\n const playlists = isAudioOnly(main) ? vhsHandler.playlistController_.getAudioTrackPlaylists_() : main.playlists;\n if (!playlists) {\n return [];\n }\n return playlists.filter(media => !isIncompatible(media)).map((e, i) => new Representation(vhsHandler, e, e.id));\n };\n};\n\n/**\n * @file playback-watcher.js\n *\n * Playback starts, and now my watch begins. It shall not end until my death. I shall\n * take no wait, hold no uncleared timeouts, father no bad seeks. I shall wear no crowns\n * and win no glory. I shall live and die at my post. I am the corrector of the underflow.\n * I am the watcher of gaps. I am the shield that guards the realms of seekable. I pledge\n * my life and honor to the Playback Watch, for this Player and all the Players to come.\n */\n\nconst timerCancelEvents = ['seeking', 'seeked', 'pause', 'playing', 'error'];\n/**\n * @class PlaybackWatcher\n */\n\nclass PlaybackWatcher extends videojs.EventTarget {\n /**\n * Represents an PlaybackWatcher object.\n *\n * @class\n * @param {Object} options an object that includes the tech and settings\n */\n constructor(options) {\n super();\n this.playlistController_ = options.playlistController;\n this.tech_ = options.tech;\n this.seekable = options.seekable;\n this.allowSeeksWithinUnsafeLiveWindow = options.allowSeeksWithinUnsafeLiveWindow;\n this.liveRangeSafeTimeDelta = options.liveRangeSafeTimeDelta;\n this.media = options.media;\n this.playedRanges_ = [];\n this.consecutiveUpdates = 0;\n this.lastRecordedTime = null;\n this.checkCurrentTimeTimeout_ = null;\n this.logger_ = logger('PlaybackWatcher');\n this.logger_('initialize');\n const playHandler = () => this.monitorCurrentTime_();\n const canPlayHandler = () => this.monitorCurrentTime_();\n const waitingHandler = () => this.techWaiting_();\n const cancelTimerHandler = () => this.resetTimeUpdate_();\n const pc = this.playlistController_;\n const loaderTypes = ['main', 'subtitle', 'audio'];\n const loaderChecks = {};\n loaderTypes.forEach(type => {\n loaderChecks[type] = {\n reset: () => this.resetSegmentDownloads_(type),\n updateend: () => this.checkSegmentDownloads_(type)\n };\n pc[`${type}SegmentLoader_`].on('appendsdone', loaderChecks[type].updateend); // If a rendition switch happens during a playback stall where the buffer\n // isn't changing we want to reset. We cannot assume that the new rendition\n // will also be stalled, until after new appends.\n\n pc[`${type}SegmentLoader_`].on('playlistupdate', loaderChecks[type].reset); // Playback stalls should not be detected right after seeking.\n // This prevents one segment playlists (single vtt or single segment content)\n // from being detected as stalling. As the buffer will not change in those cases, since\n // the buffer is the entire video duration.\n\n this.tech_.on(['seeked', 'seeking'], loaderChecks[type].reset);\n });\n /**\n * We check if a seek was into a gap through the following steps:\n * 1. We get a seeking event and we do not get a seeked event. This means that\n * a seek was attempted but not completed.\n * 2. We run `fixesBadSeeks_` on segment loader appends. This means that we already\n * removed everything from our buffer and appended a segment, and should be ready\n * to check for gaps.\n */\n\n const setSeekingHandlers = fn => {\n ['main', 'audio'].forEach(type => {\n pc[`${type}SegmentLoader_`][fn]('appended', this.seekingAppendCheck_);\n });\n };\n this.seekingAppendCheck_ = () => {\n if (this.fixesBadSeeks_()) {\n this.consecutiveUpdates = 0;\n this.lastRecordedTime = this.tech_.currentTime();\n setSeekingHandlers('off');\n }\n };\n this.clearSeekingAppendCheck_ = () => setSeekingHandlers('off');\n this.watchForBadSeeking_ = () => {\n this.clearSeekingAppendCheck_();\n setSeekingHandlers('on');\n };\n this.tech_.on('seeked', this.clearSeekingAppendCheck_);\n this.tech_.on('seeking', this.watchForBadSeeking_);\n this.tech_.on('waiting', waitingHandler);\n this.tech_.on(timerCancelEvents, cancelTimerHandler);\n this.tech_.on('canplay', canPlayHandler);\n /*\n An edge case exists that results in gaps not being skipped when they exist at the beginning of a stream. This case\n is surfaced in one of two ways:\n 1) The `waiting` event is fired before the player has buffered content, making it impossible\n to find or skip the gap. The `waiting` event is followed by a `play` event. On first play\n we can check if playback is stalled due to a gap, and skip the gap if necessary.\n 2) A source with a gap at the beginning of the stream is loaded programatically while the player\n is in a playing state. To catch this case, it's important that our one-time play listener is setup\n even if the player is in a playing state\n */\n\n this.tech_.one('play', playHandler); // Define the dispose function to clean up our events\n\n this.dispose = () => {\n this.clearSeekingAppendCheck_();\n this.logger_('dispose');\n this.tech_.off('waiting', waitingHandler);\n this.tech_.off(timerCancelEvents, cancelTimerHandler);\n this.tech_.off('canplay', canPlayHandler);\n this.tech_.off('play', playHandler);\n this.tech_.off('seeking', this.watchForBadSeeking_);\n this.tech_.off('seeked', this.clearSeekingAppendCheck_);\n loaderTypes.forEach(type => {\n pc[`${type}SegmentLoader_`].off('appendsdone', loaderChecks[type].updateend);\n pc[`${type}SegmentLoader_`].off('playlistupdate', loaderChecks[type].reset);\n this.tech_.off(['seeked', 'seeking'], loaderChecks[type].reset);\n });\n if (this.checkCurrentTimeTimeout_) {\n window$1.clearTimeout(this.checkCurrentTimeTimeout_);\n }\n this.resetTimeUpdate_();\n };\n }\n /**\n * Periodically check current time to see if playback stopped\n *\n * @private\n */\n\n monitorCurrentTime_() {\n this.checkCurrentTime_();\n if (this.checkCurrentTimeTimeout_) {\n window$1.clearTimeout(this.checkCurrentTimeTimeout_);\n } // 42 = 24 fps // 250 is what Webkit uses // FF uses 15\n\n this.checkCurrentTimeTimeout_ = window$1.setTimeout(this.monitorCurrentTime_.bind(this), 250);\n }\n /**\n * Reset stalled download stats for a specific type of loader\n *\n * @param {string} type\n * The segment loader type to check.\n *\n * @listens SegmentLoader#playlistupdate\n * @listens Tech#seeking\n * @listens Tech#seeked\n */\n\n resetSegmentDownloads_(type) {\n const loader = this.playlistController_[`${type}SegmentLoader_`];\n if (this[`${type}StalledDownloads_`] > 0) {\n this.logger_(`resetting possible stalled download count for ${type} loader`);\n }\n this[`${type}StalledDownloads_`] = 0;\n this[`${type}Buffered_`] = loader.buffered_();\n }\n /**\n * Checks on every segment `appendsdone` to see\n * if segment appends are making progress. If they are not\n * and we are still downloading bytes. We exclude the playlist.\n *\n * @param {string} type\n * The segment loader type to check.\n *\n * @listens SegmentLoader#appendsdone\n */\n\n checkSegmentDownloads_(type) {\n const pc = this.playlistController_;\n const loader = pc[`${type}SegmentLoader_`];\n const buffered = loader.buffered_();\n const isBufferedDifferent = isRangeDifferent(this[`${type}Buffered_`], buffered);\n this[`${type}Buffered_`] = buffered; // if another watcher is going to fix the issue or\n // the buffered value for this loader changed\n // appends are working\n\n if (isBufferedDifferent) {\n const metadata = {\n bufferedRanges: buffered\n };\n pc.trigger({\n type: 'bufferedrangeschanged',\n metadata\n });\n this.resetSegmentDownloads_(type);\n return;\n }\n this[`${type}StalledDownloads_`]++;\n this.logger_(`found #${this[`${type}StalledDownloads_`]} ${type} appends that did not increase buffer (possible stalled download)`, {\n playlistId: loader.playlist_ && loader.playlist_.id,\n buffered: timeRangesToArray(buffered)\n }); // after 10 possibly stalled appends with no reset, exclude\n\n if (this[`${type}StalledDownloads_`] < 10) {\n return;\n }\n this.logger_(`${type} loader stalled download exclusion`);\n this.resetSegmentDownloads_(type);\n this.tech_.trigger({\n type: 'usage',\n name: `vhs-${type}-download-exclusion`\n });\n if (type === 'subtitle') {\n return;\n } // TODO: should we exclude audio tracks rather than main tracks\n // when type is audio?\n\n pc.excludePlaylist({\n error: {\n message: `Excessive ${type} segment downloading detected.`\n },\n playlistExclusionDuration: Infinity\n });\n }\n /**\n * The purpose of this function is to emulate the \"waiting\" event on\n * browsers that do not emit it when they are waiting for more\n * data to continue playback\n *\n * @private\n */\n\n checkCurrentTime_() {\n if (this.tech_.paused() || this.tech_.seeking()) {\n return;\n }\n const currentTime = this.tech_.currentTime();\n const buffered = this.tech_.buffered();\n if (this.lastRecordedTime === currentTime && (!buffered.length || currentTime + SAFE_TIME_DELTA >= buffered.end(buffered.length - 1))) {\n // If current time is at the end of the final buffered region, then any playback\n // stall is most likely caused by buffering in a low bandwidth environment. The tech\n // should fire a `waiting` event in this scenario, but due to browser and tech\n // inconsistencies. Calling `techWaiting_` here allows us to simulate\n // responding to a native `waiting` event when the tech fails to emit one.\n return this.techWaiting_();\n }\n if (this.consecutiveUpdates >= 5 && currentTime === this.lastRecordedTime) {\n this.consecutiveUpdates++;\n this.waiting_();\n } else if (currentTime === this.lastRecordedTime) {\n this.consecutiveUpdates++;\n } else {\n this.playedRanges_.push(createTimeRanges([this.lastRecordedTime, currentTime]));\n const metadata = {\n playedRanges: this.playedRanges_\n };\n this.playlistController_.trigger({\n type: 'playedrangeschanged',\n metadata\n });\n this.consecutiveUpdates = 0;\n this.lastRecordedTime = currentTime;\n }\n }\n /**\n * Resets the 'timeupdate' mechanism designed to detect that we are stalled\n *\n * @private\n */\n\n resetTimeUpdate_() {\n this.consecutiveUpdates = 0;\n }\n /**\n * Fixes situations where there's a bad seek\n *\n * @return {boolean} whether an action was taken to fix the seek\n * @private\n */\n\n fixesBadSeeks_() {\n const seeking = this.tech_.seeking();\n if (!seeking) {\n return false;\n } // TODO: It's possible that these seekable checks should be moved out of this function\n // and into a function that runs on seekablechange. It's also possible that we only need\n // afterSeekableWindow as the buffered check at the bottom is good enough to handle before\n // seekable range.\n\n const seekable = this.seekable();\n const currentTime = this.tech_.currentTime();\n const isAfterSeekableRange = this.afterSeekableWindow_(seekable, currentTime, this.media(), this.allowSeeksWithinUnsafeLiveWindow);\n let seekTo;\n if (isAfterSeekableRange) {\n const seekableEnd = seekable.end(seekable.length - 1); // sync to live point (if VOD, our seekable was updated and we're simply adjusting)\n\n seekTo = seekableEnd;\n }\n if (this.beforeSeekableWindow_(seekable, currentTime)) {\n const seekableStart = seekable.start(0); // sync to the beginning of the live window\n // provide a buffer of .1 seconds to handle rounding/imprecise numbers\n\n seekTo = seekableStart + (\n // if the playlist is too short and the seekable range is an exact time (can\n // happen in live with a 3 segment playlist), then don't use a time delta\n seekableStart === seekable.end(0) ? 0 : SAFE_TIME_DELTA);\n }\n if (typeof seekTo !== 'undefined') {\n this.logger_(`Trying to seek outside of seekable at time ${currentTime} with ` + `seekable range ${printableRange(seekable)}. Seeking to ` + `${seekTo}.`);\n this.tech_.setCurrentTime(seekTo);\n return true;\n }\n const sourceUpdater = this.playlistController_.sourceUpdater_;\n const buffered = this.tech_.buffered();\n const audioBuffered = sourceUpdater.audioBuffer ? sourceUpdater.audioBuffered() : null;\n const videoBuffered = sourceUpdater.videoBuffer ? sourceUpdater.videoBuffered() : null;\n const media = this.media(); // verify that at least two segment durations or one part duration have been\n // appended before checking for a gap.\n\n const minAppendedDuration = media.partTargetDuration ? media.partTargetDuration : (media.targetDuration - TIME_FUDGE_FACTOR) * 2; // verify that at least two segment durations have been\n // appended before checking for a gap.\n\n const bufferedToCheck = [audioBuffered, videoBuffered];\n for (let i = 0; i < bufferedToCheck.length; i++) {\n // skip null buffered\n if (!bufferedToCheck[i]) {\n continue;\n }\n const timeAhead = timeAheadOf(bufferedToCheck[i], currentTime); // if we are less than two video/audio segment durations or one part\n // duration behind we haven't appended enough to call this a bad seek.\n\n if (timeAhead < minAppendedDuration) {\n return false;\n }\n }\n const nextRange = findNextRange(buffered, currentTime); // we have appended enough content, but we don't have anything buffered\n // to seek over the gap\n\n if (nextRange.length === 0) {\n return false;\n }\n seekTo = nextRange.start(0) + SAFE_TIME_DELTA;\n this.logger_(`Buffered region starts (${nextRange.start(0)}) ` + ` just beyond seek point (${currentTime}). Seeking to ${seekTo}.`);\n this.tech_.setCurrentTime(seekTo);\n return true;\n }\n /**\n * Handler for situations when we determine the player is waiting.\n *\n * @private\n */\n\n waiting_() {\n if (this.techWaiting_()) {\n return;\n } // All tech waiting checks failed. Use last resort correction\n\n const currentTime = this.tech_.currentTime();\n const buffered = this.tech_.buffered();\n const currentRange = findRange(buffered, currentTime); // Sometimes the player can stall for unknown reasons within a contiguous buffered\n // region with no indication that anything is amiss (seen in Firefox). Seeking to\n // currentTime is usually enough to kickstart the player. This checks that the player\n // is currently within a buffered region before attempting a corrective seek.\n // Chrome does not appear to continue `timeupdate` events after a `waiting` event\n // until there is ~ 3 seconds of forward buffer available. PlaybackWatcher should also\n // make sure there is ~3 seconds of forward buffer before taking any corrective action\n // to avoid triggering an `unknownwaiting` event when the network is slow.\n\n if (currentRange.length && currentTime + 3 <= currentRange.end(0)) {\n this.resetTimeUpdate_();\n this.tech_.setCurrentTime(currentTime);\n this.logger_(`Stopped at ${currentTime} while inside a buffered region ` + `[${currentRange.start(0)} -> ${currentRange.end(0)}]. Attempting to resume ` + 'playback by seeking to the current time.'); // unknown waiting corrections may be useful for monitoring QoS\n\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-unknown-waiting'\n });\n return;\n }\n }\n /**\n * Handler for situations when the tech fires a `waiting` event\n *\n * @return {boolean}\n * True if an action (or none) was needed to correct the waiting. False if no\n * checks passed\n * @private\n */\n\n techWaiting_() {\n const seekable = this.seekable();\n const currentTime = this.tech_.currentTime();\n if (this.tech_.seeking()) {\n // Tech is seeking or already waiting on another action, no action needed\n return true;\n }\n if (this.beforeSeekableWindow_(seekable, currentTime)) {\n const livePoint = seekable.end(seekable.length - 1);\n this.logger_(`Fell out of live window at time ${currentTime}. Seeking to ` + `live point (seekable end) ${livePoint}`);\n this.resetTimeUpdate_();\n this.tech_.setCurrentTime(livePoint); // live window resyncs may be useful for monitoring QoS\n\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-live-resync'\n });\n return true;\n }\n const sourceUpdater = this.tech_.vhs.playlistController_.sourceUpdater_;\n const buffered = this.tech_.buffered();\n const videoUnderflow = this.videoUnderflow_({\n audioBuffered: sourceUpdater.audioBuffered(),\n videoBuffered: sourceUpdater.videoBuffered(),\n currentTime\n });\n if (videoUnderflow) {\n // Even though the video underflowed and was stuck in a gap, the audio overplayed\n // the gap, leading currentTime into a buffered range. Seeking to currentTime\n // allows the video to catch up to the audio position without losing any audio\n // (only suffering ~3 seconds of frozen video and a pause in audio playback).\n this.resetTimeUpdate_();\n this.tech_.setCurrentTime(currentTime); // video underflow may be useful for monitoring QoS\n\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-video-underflow'\n });\n return true;\n }\n const nextRange = findNextRange(buffered, currentTime); // check for gap\n\n if (nextRange.length > 0) {\n this.logger_(`Stopped at ${currentTime} and seeking to ${nextRange.start(0)}`);\n this.resetTimeUpdate_();\n this.skipTheGap_(currentTime);\n return true;\n } // All checks failed. Returning false to indicate failure to correct waiting\n\n return false;\n }\n afterSeekableWindow_(seekable, currentTime, playlist, allowSeeksWithinUnsafeLiveWindow = false) {\n if (!seekable.length) {\n // we can't make a solid case if there's no seekable, default to false\n return false;\n }\n let allowedEnd = seekable.end(seekable.length - 1) + SAFE_TIME_DELTA;\n const isLive = !playlist.endList;\n const isLLHLS = typeof playlist.partTargetDuration === 'number';\n if (isLive && (isLLHLS || allowSeeksWithinUnsafeLiveWindow)) {\n allowedEnd = seekable.end(seekable.length - 1) + playlist.targetDuration * 3;\n }\n if (currentTime > allowedEnd) {\n return true;\n }\n return false;\n }\n beforeSeekableWindow_(seekable, currentTime) {\n if (seekable.length &&\n // can't fall before 0 and 0 seekable start identifies VOD stream\n seekable.start(0) > 0 && currentTime < seekable.start(0) - this.liveRangeSafeTimeDelta) {\n return true;\n }\n return false;\n }\n videoUnderflow_({\n videoBuffered,\n audioBuffered,\n currentTime\n }) {\n // audio only content will not have video underflow :)\n if (!videoBuffered) {\n return;\n }\n let gap; // find a gap in demuxed content.\n\n if (videoBuffered.length && audioBuffered.length) {\n // in Chrome audio will continue to play for ~3s when we run out of video\n // so we have to check that the video buffer did have some buffer in the\n // past.\n const lastVideoRange = findRange(videoBuffered, currentTime - 3);\n const videoRange = findRange(videoBuffered, currentTime);\n const audioRange = findRange(audioBuffered, currentTime);\n if (audioRange.length && !videoRange.length && lastVideoRange.length) {\n gap = {\n start: lastVideoRange.end(0),\n end: audioRange.end(0)\n };\n } // find a gap in muxed content.\n } else {\n const nextRange = findNextRange(videoBuffered, currentTime); // Even if there is no available next range, there is still a possibility we are\n // stuck in a gap due to video underflow.\n\n if (!nextRange.length) {\n gap = this.gapFromVideoUnderflow_(videoBuffered, currentTime);\n }\n }\n if (gap) {\n this.logger_(`Encountered a gap in video from ${gap.start} to ${gap.end}. ` + `Seeking to current time ${currentTime}`);\n return true;\n }\n return false;\n }\n /**\n * Timer callback. If playback still has not proceeded, then we seek\n * to the start of the next buffered region.\n *\n * @private\n */\n\n skipTheGap_(scheduledCurrentTime) {\n const buffered = this.tech_.buffered();\n const currentTime = this.tech_.currentTime();\n const nextRange = findNextRange(buffered, currentTime);\n this.resetTimeUpdate_();\n if (nextRange.length === 0 || currentTime !== scheduledCurrentTime) {\n return;\n }\n this.logger_('skipTheGap_:', 'currentTime:', currentTime, 'scheduled currentTime:', scheduledCurrentTime, 'nextRange start:', nextRange.start(0)); // only seek if we still have not played\n\n this.tech_.setCurrentTime(nextRange.start(0) + TIME_FUDGE_FACTOR);\n const metadata = {\n gapInfo: {\n from: currentTime,\n to: nextRange.start(0)\n }\n };\n this.playlistController_.trigger({\n type: 'gapjumped',\n metadata\n });\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-gap-skip'\n });\n }\n gapFromVideoUnderflow_(buffered, currentTime) {\n // At least in Chrome, if there is a gap in the video buffer, the audio will continue\n // playing for ~3 seconds after the video gap starts. This is done to account for\n // video buffer underflow/underrun (note that this is not done when there is audio\n // buffer underflow/underrun -- in that case the video will stop as soon as it\n // encounters the gap, as audio stalls are more noticeable/jarring to a user than\n // video stalls). The player's time will reflect the playthrough of audio, so the\n // time will appear as if we are in a buffered region, even if we are stuck in a\n // \"gap.\"\n //\n // Example:\n // video buffer: 0 => 10.1, 10.2 => 20\n // audio buffer: 0 => 20\n // overall buffer: 0 => 10.1, 10.2 => 20\n // current time: 13\n //\n // Chrome's video froze at 10 seconds, where the video buffer encountered the gap,\n // however, the audio continued playing until it reached ~3 seconds past the gap\n // (13 seconds), at which point it stops as well. Since current time is past the\n // gap, findNextRange will return no ranges.\n //\n // To check for this issue, we see if there is a gap that starts somewhere within\n // a 3 second range (3 seconds +/- 1 second) back from our current time.\n const gaps = findGaps(buffered);\n for (let i = 0; i < gaps.length; i++) {\n const start = gaps.start(i);\n const end = gaps.end(i); // gap is starts no more than 4 seconds back\n\n if (currentTime - start < 4 && currentTime - start > 2) {\n return {\n start,\n end\n };\n }\n }\n return null;\n }\n}\nconst defaultOptions = {\n errorInterval: 30,\n getSource(next) {\n const tech = this.tech({\n IWillNotUseThisInPlugins: true\n });\n const sourceObj = tech.currentSource_ || this.currentSource();\n return next(sourceObj);\n }\n};\n/**\n * Main entry point for the plugin\n *\n * @param {Player} player a reference to a videojs Player instance\n * @param {Object} [options] an object with plugin options\n * @private\n */\n\nconst initPlugin = function (player, options) {\n let lastCalled = 0;\n let seekTo = 0;\n const localOptions = merge(defaultOptions, options);\n player.ready(() => {\n player.trigger({\n type: 'usage',\n name: 'vhs-error-reload-initialized'\n });\n });\n /**\n * Player modifications to perform that must wait until `loadedmetadata`\n * has been triggered\n *\n * @private\n */\n\n const loadedMetadataHandler = function () {\n if (seekTo) {\n player.currentTime(seekTo);\n }\n };\n /**\n * Set the source on the player element, play, and seek if necessary\n *\n * @param {Object} sourceObj An object specifying the source url and mime-type to play\n * @private\n */\n\n const setSource = function (sourceObj) {\n if (sourceObj === null || sourceObj === undefined) {\n return;\n }\n seekTo = player.duration() !== Infinity && player.currentTime() || 0;\n player.one('loadedmetadata', loadedMetadataHandler);\n player.src(sourceObj);\n player.trigger({\n type: 'usage',\n name: 'vhs-error-reload'\n });\n player.play();\n };\n /**\n * Attempt to get a source from either the built-in getSource function\n * or a custom function provided via the options\n *\n * @private\n */\n\n const errorHandler = function () {\n // Do not attempt to reload the source if a source-reload occurred before\n // 'errorInterval' time has elapsed since the last source-reload\n if (Date.now() - lastCalled < localOptions.errorInterval * 1000) {\n player.trigger({\n type: 'usage',\n name: 'vhs-error-reload-canceled'\n });\n return;\n }\n if (!localOptions.getSource || typeof localOptions.getSource !== 'function') {\n videojs.log.error('ERROR: reloadSourceOnError - The option getSource must be a function!');\n return;\n }\n lastCalled = Date.now();\n return localOptions.getSource.call(player, setSource);\n };\n /**\n * Unbind any event handlers that were bound by the plugin\n *\n * @private\n */\n\n const cleanupEvents = function () {\n player.off('loadedmetadata', loadedMetadataHandler);\n player.off('error', errorHandler);\n player.off('dispose', cleanupEvents);\n };\n /**\n * Cleanup before re-initializing the plugin\n *\n * @param {Object} [newOptions] an object with plugin options\n * @private\n */\n\n const reinitPlugin = function (newOptions) {\n cleanupEvents();\n initPlugin(player, newOptions);\n };\n player.on('error', errorHandler);\n player.on('dispose', cleanupEvents); // Overwrite the plugin function so that we can correctly cleanup before\n // initializing the plugin\n\n player.reloadSourceOnError = reinitPlugin;\n};\n/**\n * Reload the source when an error is detected as long as there\n * wasn't an error previously within the last 30 seconds\n *\n * @param {Object} [options] an object with plugin options\n */\n\nconst reloadSourceOnError = function (options) {\n initPlugin(this, options);\n};\nvar version$4 = \"3.13.2\";\nvar version$3 = \"7.0.3\";\nvar version$2 = \"1.3.0\";\nvar version$1 = \"7.1.0\";\nvar version = \"4.0.1\";\nconst Vhs = {\n PlaylistLoader,\n Playlist,\n utils,\n STANDARD_PLAYLIST_SELECTOR: lastBandwidthSelector,\n INITIAL_PLAYLIST_SELECTOR: lowestBitrateCompatibleVariantSelector,\n lastBandwidthSelector,\n movingAverageBandwidthSelector,\n comparePlaylistBandwidth,\n comparePlaylistResolution,\n xhr: xhrFactory()\n}; // Define getter/setters for config properties\n\nObject.keys(Config).forEach(prop => {\n Object.defineProperty(Vhs, prop, {\n get() {\n videojs.log.warn(`using Vhs.${prop} is UNSAFE be sure you know what you are doing`);\n return Config[prop];\n },\n set(value) {\n videojs.log.warn(`using Vhs.${prop} is UNSAFE be sure you know what you are doing`);\n if (typeof value !== 'number' || value < 0) {\n videojs.log.warn(`value of Vhs.${prop} must be greater than or equal to 0`);\n return;\n }\n Config[prop] = value;\n }\n });\n});\nconst LOCAL_STORAGE_KEY = 'videojs-vhs';\n/**\n * Updates the selectedIndex of the QualityLevelList when a mediachange happens in vhs.\n *\n * @param {QualityLevelList} qualityLevels The QualityLevelList to update.\n * @param {PlaylistLoader} playlistLoader PlaylistLoader containing the new media info.\n * @function handleVhsMediaChange\n */\n\nconst handleVhsMediaChange = function (qualityLevels, playlistLoader) {\n const newPlaylist = playlistLoader.media();\n let selectedIndex = -1;\n for (let i = 0; i < qualityLevels.length; i++) {\n if (qualityLevels[i].id === newPlaylist.id) {\n selectedIndex = i;\n break;\n }\n }\n qualityLevels.selectedIndex_ = selectedIndex;\n qualityLevels.trigger({\n selectedIndex,\n type: 'change'\n });\n};\n/**\n * Adds quality levels to list once playlist metadata is available\n *\n * @param {QualityLevelList} qualityLevels The QualityLevelList to attach events to.\n * @param {Object} vhs Vhs object to listen to for media events.\n * @function handleVhsLoadedMetadata\n */\n\nconst handleVhsLoadedMetadata = function (qualityLevels, vhs) {\n vhs.representations().forEach(rep => {\n qualityLevels.addQualityLevel(rep);\n });\n handleVhsMediaChange(qualityLevels, vhs.playlists);\n}; // VHS is a source handler, not a tech. Make sure attempts to use it\n// as one do not cause exceptions.\n\nVhs.canPlaySource = function () {\n return videojs.log.warn('VHS is no longer a tech. Please remove it from ' + 'your player\\'s techOrder.');\n};\nconst emeKeySystems = (keySystemOptions, mainPlaylist, audioPlaylist) => {\n if (!keySystemOptions) {\n return keySystemOptions;\n }\n let codecs = {};\n if (mainPlaylist && mainPlaylist.attributes && mainPlaylist.attributes.CODECS) {\n codecs = unwrapCodecList(parseCodecs(mainPlaylist.attributes.CODECS));\n }\n if (audioPlaylist && audioPlaylist.attributes && audioPlaylist.attributes.CODECS) {\n codecs.audio = audioPlaylist.attributes.CODECS;\n }\n const videoContentType = getMimeForCodec(codecs.video);\n const audioContentType = getMimeForCodec(codecs.audio); // upsert the content types based on the selected playlist\n\n const keySystemContentTypes = {};\n for (const keySystem in keySystemOptions) {\n keySystemContentTypes[keySystem] = {};\n if (audioContentType) {\n keySystemContentTypes[keySystem].audioContentType = audioContentType;\n }\n if (videoContentType) {\n keySystemContentTypes[keySystem].videoContentType = videoContentType;\n } // Default to using the video playlist's PSSH even though they may be different, as\n // videojs-contrib-eme will only accept one in the options.\n //\n // This shouldn't be an issue for most cases as early intialization will handle all\n // unique PSSH values, and if they aren't, then encrypted events should have the\n // specific information needed for the unique license.\n\n if (mainPlaylist.contentProtection && mainPlaylist.contentProtection[keySystem] && mainPlaylist.contentProtection[keySystem].pssh) {\n keySystemContentTypes[keySystem].pssh = mainPlaylist.contentProtection[keySystem].pssh;\n } // videojs-contrib-eme accepts the option of specifying: 'com.some.cdm': 'url'\n // so we need to prevent overwriting the URL entirely\n\n if (typeof keySystemOptions[keySystem] === 'string') {\n keySystemContentTypes[keySystem].url = keySystemOptions[keySystem];\n }\n }\n return merge(keySystemOptions, keySystemContentTypes);\n};\n/**\n * @typedef {Object} KeySystems\n *\n * keySystems configuration for https://github.com/videojs/videojs-contrib-eme\n * Note: not all options are listed here.\n *\n * @property {Uint8Array} [pssh]\n * Protection System Specific Header\n */\n\n/**\n * Goes through all the playlists and collects an array of KeySystems options objects\n * containing each playlist's keySystems and their pssh values, if available.\n *\n * @param {Object[]} playlists\n * The playlists to look through\n * @param {string[]} keySystems\n * The keySystems to collect pssh values for\n *\n * @return {KeySystems[]}\n * An array of KeySystems objects containing available key systems and their\n * pssh values\n */\n\nconst getAllPsshKeySystemsOptions = (playlists, keySystems) => {\n return playlists.reduce((keySystemsArr, playlist) => {\n if (!playlist.contentProtection) {\n return keySystemsArr;\n }\n const keySystemsOptions = keySystems.reduce((keySystemsObj, keySystem) => {\n const keySystemOptions = playlist.contentProtection[keySystem];\n if (keySystemOptions && keySystemOptions.pssh) {\n keySystemsObj[keySystem] = {\n pssh: keySystemOptions.pssh\n };\n }\n return keySystemsObj;\n }, {});\n if (Object.keys(keySystemsOptions).length) {\n keySystemsArr.push(keySystemsOptions);\n }\n return keySystemsArr;\n }, []);\n};\n/**\n * Returns a promise that waits for the\n * [eme plugin](https://github.com/videojs/videojs-contrib-eme) to create a key session.\n *\n * Works around https://bugs.chromium.org/p/chromium/issues/detail?id=895449 in non-IE11\n * browsers.\n *\n * As per the above ticket, this is particularly important for Chrome, where, if\n * unencrypted content is appended before encrypted content and the key session has not\n * been created, a MEDIA_ERR_DECODE will be thrown once the encrypted content is reached\n * during playback.\n *\n * @param {Object} player\n * The player instance\n * @param {Object[]} sourceKeySystems\n * The key systems options from the player source\n * @param {Object} [audioMedia]\n * The active audio media playlist (optional)\n * @param {Object[]} mainPlaylists\n * The playlists found on the main playlist object\n *\n * @return {Object}\n * Promise that resolves when the key session has been created\n */\n\nconst waitForKeySessionCreation = ({\n player,\n sourceKeySystems,\n audioMedia,\n mainPlaylists\n}) => {\n if (!player.eme.initializeMediaKeys) {\n return Promise.resolve();\n } // TODO should all audio PSSH values be initialized for DRM?\n //\n // All unique video rendition pssh values are initialized for DRM, but here only\n // the initial audio playlist license is initialized. In theory, an encrypted\n // event should be fired if the user switches to an alternative audio playlist\n // where a license is required, but this case hasn't yet been tested. In addition, there\n // may be many alternate audio playlists unlikely to be used (e.g., multiple different\n // languages).\n\n const playlists = audioMedia ? mainPlaylists.concat([audioMedia]) : mainPlaylists;\n const keySystemsOptionsArr = getAllPsshKeySystemsOptions(playlists, Object.keys(sourceKeySystems));\n const initializationFinishedPromises = [];\n const keySessionCreatedPromises = []; // Since PSSH values are interpreted as initData, EME will dedupe any duplicates. The\n // only place where it should not be deduped is for ms-prefixed APIs, but\n // the existence of modern EME APIs in addition to\n // ms-prefixed APIs on Edge should prevent this from being a concern.\n // initializeMediaKeys also won't use the webkit-prefixed APIs.\n\n keySystemsOptionsArr.forEach(keySystemsOptions => {\n keySessionCreatedPromises.push(new Promise((resolve, reject) => {\n player.tech_.one('keysessioncreated', resolve);\n }));\n initializationFinishedPromises.push(new Promise((resolve, reject) => {\n player.eme.initializeMediaKeys({\n keySystems: keySystemsOptions\n }, err => {\n if (err) {\n reject(err);\n return;\n }\n resolve();\n });\n }));\n }); // The reasons Promise.race is chosen over Promise.any:\n //\n // * Promise.any is only available in Safari 14+.\n // * None of these promises are expected to reject. If they do reject, it might be\n // better here for the race to surface the rejection, rather than mask it by using\n // Promise.any.\n\n return Promise.race([\n // If a session was previously created, these will all finish resolving without\n // creating a new session, otherwise it will take until the end of all license\n // requests, which is why the key session check is used (to make setup much faster).\n Promise.all(initializationFinishedPromises),\n // Once a single session is created, the browser knows DRM will be used.\n Promise.race(keySessionCreatedPromises)]);\n};\n/**\n * If the [eme](https://github.com/videojs/videojs-contrib-eme) plugin is available, and\n * there are keySystems on the source, sets up source options to prepare the source for\n * eme.\n *\n * @param {Object} player\n * The player instance\n * @param {Object[]} sourceKeySystems\n * The key systems options from the player source\n * @param {Object} media\n * The active media playlist\n * @param {Object} [audioMedia]\n * The active audio media playlist (optional)\n *\n * @return {boolean}\n * Whether or not options were configured and EME is available\n */\n\nconst setupEmeOptions = ({\n player,\n sourceKeySystems,\n media,\n audioMedia\n}) => {\n const sourceOptions = emeKeySystems(sourceKeySystems, media, audioMedia);\n if (!sourceOptions) {\n return false;\n }\n player.currentSource().keySystems = sourceOptions; // eme handles the rest of the setup, so if it is missing\n // do nothing.\n\n if (sourceOptions && !player.eme) {\n videojs.log.warn('DRM encrypted source cannot be decrypted without a DRM plugin');\n return false;\n }\n return true;\n};\nconst getVhsLocalStorage = () => {\n if (!window$1.localStorage) {\n return null;\n }\n const storedObject = window$1.localStorage.getItem(LOCAL_STORAGE_KEY);\n if (!storedObject) {\n return null;\n }\n try {\n return JSON.parse(storedObject);\n } catch (e) {\n // someone may have tampered with the value\n return null;\n }\n};\nconst updateVhsLocalStorage = options => {\n if (!window$1.localStorage) {\n return false;\n }\n let objectToStore = getVhsLocalStorage();\n objectToStore = objectToStore ? merge(objectToStore, options) : options;\n try {\n window$1.localStorage.setItem(LOCAL_STORAGE_KEY, JSON.stringify(objectToStore));\n } catch (e) {\n // Throws if storage is full (e.g., always on iOS 5+ Safari private mode, where\n // storage is set to 0).\n // https://developer.mozilla.org/en-US/docs/Web/API/Storage/setItem#Exceptions\n // No need to perform any operation.\n return false;\n }\n return objectToStore;\n};\n/**\n * Parses VHS-supported media types from data URIs. See\n * https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs\n * for information on data URIs.\n *\n * @param {string} dataUri\n * The data URI\n *\n * @return {string|Object}\n * The parsed object/string, or the original string if no supported media type\n * was found\n */\n\nconst expandDataUri = dataUri => {\n if (dataUri.toLowerCase().indexOf('data:application/vnd.videojs.vhs+json,') === 0) {\n return JSON.parse(dataUri.substring(dataUri.indexOf(',') + 1));\n } // no known case for this data URI, return the string as-is\n\n return dataUri;\n};\n/**\n * Adds a request hook to an xhr object\n *\n * @param {Object} xhr object to add the onRequest hook to\n * @param {function} callback hook function for an xhr request\n */\n\nconst addOnRequestHook = (xhr, callback) => {\n if (!xhr._requestCallbackSet) {\n xhr._requestCallbackSet = new Set();\n }\n xhr._requestCallbackSet.add(callback);\n};\n/**\n * Adds a response hook to an xhr object\n *\n * @param {Object} xhr object to add the onResponse hook to\n * @param {function} callback hook function for an xhr response\n */\n\nconst addOnResponseHook = (xhr, callback) => {\n if (!xhr._responseCallbackSet) {\n xhr._responseCallbackSet = new Set();\n }\n xhr._responseCallbackSet.add(callback);\n};\n/**\n * Removes a request hook on an xhr object, deletes the onRequest set if empty.\n *\n * @param {Object} xhr object to remove the onRequest hook from\n * @param {function} callback hook function to remove\n */\n\nconst removeOnRequestHook = (xhr, callback) => {\n if (!xhr._requestCallbackSet) {\n return;\n }\n xhr._requestCallbackSet.delete(callback);\n if (!xhr._requestCallbackSet.size) {\n delete xhr._requestCallbackSet;\n }\n};\n/**\n * Removes a response hook on an xhr object, deletes the onResponse set if empty.\n *\n * @param {Object} xhr object to remove the onResponse hook from\n * @param {function} callback hook function to remove\n */\n\nconst removeOnResponseHook = (xhr, callback) => {\n if (!xhr._responseCallbackSet) {\n return;\n }\n xhr._responseCallbackSet.delete(callback);\n if (!xhr._responseCallbackSet.size) {\n delete xhr._responseCallbackSet;\n }\n};\n/**\n * Whether the browser has built-in HLS support.\n */\n\nVhs.supportsNativeHls = function () {\n if (!document$1 || !document$1.createElement) {\n return false;\n }\n const video = document$1.createElement('video'); // native HLS is definitely not supported if HTML5 video isn't\n\n if (!videojs.getTech('Html5').isSupported()) {\n return false;\n } // HLS manifests can go by many mime-types\n\n const canPlay = [\n // Apple santioned\n 'application/vnd.apple.mpegurl',\n // Apple sanctioned for backwards compatibility\n 'audio/mpegurl',\n // Very common\n 'audio/x-mpegurl',\n // Very common\n 'application/x-mpegurl',\n // Included for completeness\n 'video/x-mpegurl', 'video/mpegurl', 'application/mpegurl'];\n return canPlay.some(function (canItPlay) {\n return /maybe|probably/i.test(video.canPlayType(canItPlay));\n });\n}();\nVhs.supportsNativeDash = function () {\n if (!document$1 || !document$1.createElement || !videojs.getTech('Html5').isSupported()) {\n return false;\n }\n return /maybe|probably/i.test(document$1.createElement('video').canPlayType('application/dash+xml'));\n}();\nVhs.supportsTypeNatively = type => {\n if (type === 'hls') {\n return Vhs.supportsNativeHls;\n }\n if (type === 'dash') {\n return Vhs.supportsNativeDash;\n }\n return false;\n};\n/**\n * VHS is a source handler, not a tech. Make sure attempts to use it\n * as one do not cause exceptions.\n */\n\nVhs.isSupported = function () {\n return videojs.log.warn('VHS is no longer a tech. Please remove it from ' + 'your player\\'s techOrder.');\n};\n/**\n * A global function for setting an onRequest hook\n *\n * @param {function} callback for request modifiction\n */\n\nVhs.xhr.onRequest = function (callback) {\n addOnRequestHook(Vhs.xhr, callback);\n};\n/**\n * A global function for setting an onResponse hook\n *\n * @param {callback} callback for response data retrieval\n */\n\nVhs.xhr.onResponse = function (callback) {\n addOnResponseHook(Vhs.xhr, callback);\n};\n/**\n * Deletes a global onRequest callback if it exists\n *\n * @param {function} callback to delete from the global set\n */\n\nVhs.xhr.offRequest = function (callback) {\n removeOnRequestHook(Vhs.xhr, callback);\n};\n/**\n * Deletes a global onResponse callback if it exists\n *\n * @param {function} callback to delete from the global set\n */\n\nVhs.xhr.offResponse = function (callback) {\n removeOnResponseHook(Vhs.xhr, callback);\n};\nconst Component = videojs.getComponent('Component');\n/**\n * The Vhs Handler object, where we orchestrate all of the parts\n * of VHS to interact with video.js\n *\n * @class VhsHandler\n * @extends videojs.Component\n * @param {Object} source the soruce object\n * @param {Tech} tech the parent tech object\n * @param {Object} options optional and required options\n */\n\nclass VhsHandler extends Component {\n constructor(source, tech, options) {\n super(tech, options.vhs); // if a tech level `initialBandwidth` option was passed\n // use that over the VHS level `bandwidth` option\n\n if (typeof options.initialBandwidth === 'number') {\n this.options_.bandwidth = options.initialBandwidth;\n }\n this.logger_ = logger('VhsHandler'); // we need access to the player in some cases,\n // so, get it from Video.js via the `playerId`\n\n if (tech.options_ && tech.options_.playerId) {\n const _player = videojs.getPlayer(tech.options_.playerId);\n this.player_ = _player;\n }\n this.tech_ = tech;\n this.source_ = source;\n this.stats = {};\n this.ignoreNextSeekingEvent_ = false;\n this.setOptions_();\n if (this.options_.overrideNative && tech.overrideNativeAudioTracks && tech.overrideNativeVideoTracks) {\n tech.overrideNativeAudioTracks(true);\n tech.overrideNativeVideoTracks(true);\n } else if (this.options_.overrideNative && (tech.featuresNativeVideoTracks || tech.featuresNativeAudioTracks)) {\n // overriding native VHS only works if audio tracks have been emulated\n // error early if we're misconfigured\n throw new Error('Overriding native VHS requires emulated tracks. ' + 'See https://git.io/vMpjB');\n } // listen for fullscreenchange events for this player so that we\n // can adjust our quality selection quickly\n\n this.on(document$1, ['fullscreenchange', 'webkitfullscreenchange', 'mozfullscreenchange', 'MSFullscreenChange'], event => {\n const fullscreenElement = document$1.fullscreenElement || document$1.webkitFullscreenElement || document$1.mozFullScreenElement || document$1.msFullscreenElement;\n if (fullscreenElement && fullscreenElement.contains(this.tech_.el())) {\n this.playlistController_.fastQualityChange_();\n } else {\n // When leaving fullscreen, since the in page pixel dimensions should be smaller\n // than full screen, see if there should be a rendition switch down to preserve\n // bandwidth.\n this.playlistController_.checkABR_();\n }\n });\n this.on(this.tech_, 'seeking', function () {\n if (this.ignoreNextSeekingEvent_) {\n this.ignoreNextSeekingEvent_ = false;\n return;\n }\n this.setCurrentTime(this.tech_.currentTime());\n });\n this.on(this.tech_, 'error', function () {\n // verify that the error was real and we are loaded\n // enough to have pc loaded.\n if (this.tech_.error() && this.playlistController_) {\n this.playlistController_.pauseLoading();\n }\n });\n this.on(this.tech_, 'play', this.play);\n }\n /**\n * Set VHS options based on options from configuration, as well as partial\n * options to be passed at a later time.\n *\n * @param {Object} options A partial chunk of config options\n */\n\n setOptions_(options = {}) {\n this.options_ = merge(this.options_, options); // defaults\n\n this.options_.withCredentials = this.options_.withCredentials || false;\n this.options_.limitRenditionByPlayerDimensions = this.options_.limitRenditionByPlayerDimensions === false ? false : true;\n this.options_.useDevicePixelRatio = this.options_.useDevicePixelRatio || false;\n this.options_.useBandwidthFromLocalStorage = typeof this.source_.useBandwidthFromLocalStorage !== 'undefined' ? this.source_.useBandwidthFromLocalStorage : this.options_.useBandwidthFromLocalStorage || false;\n this.options_.useForcedSubtitles = this.options_.useForcedSubtitles || false;\n this.options_.useNetworkInformationApi = this.options_.useNetworkInformationApi || false;\n this.options_.useDtsForTimestampOffset = this.options_.useDtsForTimestampOffset || false;\n this.options_.customTagParsers = this.options_.customTagParsers || [];\n this.options_.customTagMappers = this.options_.customTagMappers || [];\n this.options_.cacheEncryptionKeys = this.options_.cacheEncryptionKeys || false;\n this.options_.llhls = this.options_.llhls === false ? false : true;\n this.options_.bufferBasedABR = this.options_.bufferBasedABR || false;\n if (typeof this.options_.playlistExclusionDuration !== 'number') {\n this.options_.playlistExclusionDuration = 60;\n }\n if (typeof this.options_.bandwidth !== 'number') {\n if (this.options_.useBandwidthFromLocalStorage) {\n const storedObject = getVhsLocalStorage();\n if (storedObject && storedObject.bandwidth) {\n this.options_.bandwidth = storedObject.bandwidth;\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-bandwidth-from-local-storage'\n });\n }\n if (storedObject && storedObject.throughput) {\n this.options_.throughput = storedObject.throughput;\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-throughput-from-local-storage'\n });\n }\n }\n } // if bandwidth was not set by options or pulled from local storage, start playlist\n // selection at a reasonable bandwidth\n\n if (typeof this.options_.bandwidth !== 'number') {\n this.options_.bandwidth = Config.INITIAL_BANDWIDTH;\n } // If the bandwidth number is unchanged from the initial setting\n // then this takes precedence over the enableLowInitialPlaylist option\n\n this.options_.enableLowInitialPlaylist = this.options_.enableLowInitialPlaylist && this.options_.bandwidth === Config.INITIAL_BANDWIDTH; // grab options passed to player.src\n\n ['withCredentials', 'useDevicePixelRatio', 'customPixelRatio', 'limitRenditionByPlayerDimensions', 'bandwidth', 'customTagParsers', 'customTagMappers', 'cacheEncryptionKeys', 'playlistSelector', 'initialPlaylistSelector', 'bufferBasedABR', 'liveRangeSafeTimeDelta', 'llhls', 'useForcedSubtitles', 'useNetworkInformationApi', 'useDtsForTimestampOffset', 'exactManifestTimings', 'leastPixelDiffSelector'].forEach(option => {\n if (typeof this.source_[option] !== 'undefined') {\n this.options_[option] = this.source_[option];\n }\n });\n this.limitRenditionByPlayerDimensions = this.options_.limitRenditionByPlayerDimensions;\n this.useDevicePixelRatio = this.options_.useDevicePixelRatio;\n const customPixelRatio = this.options_.customPixelRatio; // Ensure the custom pixel ratio is a number greater than or equal to 0\n\n if (typeof customPixelRatio === 'number' && customPixelRatio >= 0) {\n this.customPixelRatio = customPixelRatio;\n }\n } // alias for public method to set options\n\n setOptions(options = {}) {\n this.setOptions_(options);\n }\n /**\n * called when player.src gets called, handle a new source\n *\n * @param {Object} src the source object to handle\n */\n\n src(src, type) {\n // do nothing if the src is falsey\n if (!src) {\n return;\n }\n this.setOptions_(); // add main playlist controller options\n\n this.options_.src = expandDataUri(this.source_.src);\n this.options_.tech = this.tech_;\n this.options_.externVhs = Vhs;\n this.options_.sourceType = simpleTypeFromSourceType(type); // Whenever we seek internally, we should update the tech\n\n this.options_.seekTo = time => {\n this.tech_.setCurrentTime(time);\n }; // pass player to allow for player level eventing on construction.\n\n this.options_.player_ = this.player_;\n this.playlistController_ = new PlaylistController(this.options_);\n const playbackWatcherOptions = merge({\n liveRangeSafeTimeDelta: SAFE_TIME_DELTA\n }, this.options_, {\n seekable: () => this.seekable(),\n media: () => this.playlistController_.media(),\n playlistController: this.playlistController_\n });\n this.playbackWatcher_ = new PlaybackWatcher(playbackWatcherOptions);\n this.attachStreamingEventListeners_();\n this.playlistController_.on('error', () => {\n const player = videojs.players[this.tech_.options_.playerId];\n let error = this.playlistController_.error;\n if (typeof error === 'object' && !error.code) {\n error.code = 3;\n } else if (typeof error === 'string') {\n error = {\n message: error,\n code: 3\n };\n }\n player.error(error);\n });\n const defaultSelector = this.options_.bufferBasedABR ? Vhs.movingAverageBandwidthSelector(0.55) : Vhs.STANDARD_PLAYLIST_SELECTOR; // `this` in selectPlaylist should be the VhsHandler for backwards\n // compatibility with < v2\n\n this.playlistController_.selectPlaylist = this.selectPlaylist ? this.selectPlaylist.bind(this) : defaultSelector.bind(this);\n this.playlistController_.selectInitialPlaylist = Vhs.INITIAL_PLAYLIST_SELECTOR.bind(this); // re-expose some internal objects for backwards compatibility with < v2\n\n this.playlists = this.playlistController_.mainPlaylistLoader_;\n this.mediaSource = this.playlistController_.mediaSource; // Proxy assignment of some properties to the main playlist\n // controller. Using a custom property for backwards compatibility\n // with < v2\n\n Object.defineProperties(this, {\n selectPlaylist: {\n get() {\n return this.playlistController_.selectPlaylist;\n },\n set(selectPlaylist) {\n this.playlistController_.selectPlaylist = selectPlaylist.bind(this);\n }\n },\n throughput: {\n get() {\n return this.playlistController_.mainSegmentLoader_.throughput.rate;\n },\n set(throughput) {\n this.playlistController_.mainSegmentLoader_.throughput.rate = throughput; // By setting `count` to 1 the throughput value becomes the starting value\n // for the cumulative average\n\n this.playlistController_.mainSegmentLoader_.throughput.count = 1;\n }\n },\n bandwidth: {\n get() {\n let playerBandwidthEst = this.playlistController_.mainSegmentLoader_.bandwidth;\n const networkInformation = window$1.navigator.connection || window$1.navigator.mozConnection || window$1.navigator.webkitConnection;\n const tenMbpsAsBitsPerSecond = 10e6;\n if (this.options_.useNetworkInformationApi && networkInformation) {\n // downlink returns Mbps\n // https://developer.mozilla.org/en-US/docs/Web/API/NetworkInformation/downlink\n const networkInfoBandwidthEstBitsPerSec = networkInformation.downlink * 1000 * 1000; // downlink maxes out at 10 Mbps. In the event that both networkInformationApi and the player\n // estimate a bandwidth greater than 10 Mbps, use the larger of the two estimates to ensure that\n // high quality streams are not filtered out.\n\n if (networkInfoBandwidthEstBitsPerSec >= tenMbpsAsBitsPerSecond && playerBandwidthEst >= tenMbpsAsBitsPerSecond) {\n playerBandwidthEst = Math.max(playerBandwidthEst, networkInfoBandwidthEstBitsPerSec);\n } else {\n playerBandwidthEst = networkInfoBandwidthEstBitsPerSec;\n }\n }\n return playerBandwidthEst;\n },\n set(bandwidth) {\n this.playlistController_.mainSegmentLoader_.bandwidth = bandwidth; // setting the bandwidth manually resets the throughput counter\n // `count` is set to zero that current value of `rate` isn't included\n // in the cumulative average\n\n this.playlistController_.mainSegmentLoader_.throughput = {\n rate: 0,\n count: 0\n };\n }\n },\n /**\n * `systemBandwidth` is a combination of two serial processes bit-rates. The first\n * is the network bitrate provided by `bandwidth` and the second is the bitrate of\n * the entire process after that - decryption, transmuxing, and appending - provided\n * by `throughput`.\n *\n * Since the two process are serial, the overall system bandwidth is given by:\n * sysBandwidth = 1 / (1 / bandwidth + 1 / throughput)\n */\n systemBandwidth: {\n get() {\n const invBandwidth = 1 / (this.bandwidth || 1);\n let invThroughput;\n if (this.throughput > 0) {\n invThroughput = 1 / this.throughput;\n } else {\n invThroughput = 0;\n }\n const systemBitrate = Math.floor(1 / (invBandwidth + invThroughput));\n return systemBitrate;\n },\n set() {\n videojs.log.error('The \"systemBandwidth\" property is read-only');\n }\n }\n });\n if (this.options_.bandwidth) {\n this.bandwidth = this.options_.bandwidth;\n }\n if (this.options_.throughput) {\n this.throughput = this.options_.throughput;\n }\n Object.defineProperties(this.stats, {\n bandwidth: {\n get: () => this.bandwidth || 0,\n enumerable: true\n },\n mediaRequests: {\n get: () => this.playlistController_.mediaRequests_() || 0,\n enumerable: true\n },\n mediaRequestsAborted: {\n get: () => this.playlistController_.mediaRequestsAborted_() || 0,\n enumerable: true\n },\n mediaRequestsTimedout: {\n get: () => this.playlistController_.mediaRequestsTimedout_() || 0,\n enumerable: true\n },\n mediaRequestsErrored: {\n get: () => this.playlistController_.mediaRequestsErrored_() || 0,\n enumerable: true\n },\n mediaTransferDuration: {\n get: () => this.playlistController_.mediaTransferDuration_() || 0,\n enumerable: true\n },\n mediaBytesTransferred: {\n get: () => this.playlistController_.mediaBytesTransferred_() || 0,\n enumerable: true\n },\n mediaSecondsLoaded: {\n get: () => this.playlistController_.mediaSecondsLoaded_() || 0,\n enumerable: true\n },\n mediaAppends: {\n get: () => this.playlistController_.mediaAppends_() || 0,\n enumerable: true\n },\n mainAppendsToLoadedData: {\n get: () => this.playlistController_.mainAppendsToLoadedData_() || 0,\n enumerable: true\n },\n audioAppendsToLoadedData: {\n get: () => this.playlistController_.audioAppendsToLoadedData_() || 0,\n enumerable: true\n },\n appendsToLoadedData: {\n get: () => this.playlistController_.appendsToLoadedData_() || 0,\n enumerable: true\n },\n timeToLoadedData: {\n get: () => this.playlistController_.timeToLoadedData_() || 0,\n enumerable: true\n },\n buffered: {\n get: () => timeRangesToArray(this.tech_.buffered()),\n enumerable: true\n },\n currentTime: {\n get: () => this.tech_.currentTime(),\n enumerable: true\n },\n currentSource: {\n get: () => this.tech_.currentSource_,\n enumerable: true\n },\n currentTech: {\n get: () => this.tech_.name_,\n enumerable: true\n },\n duration: {\n get: () => this.tech_.duration(),\n enumerable: true\n },\n main: {\n get: () => this.playlists.main,\n enumerable: true\n },\n playerDimensions: {\n get: () => this.tech_.currentDimensions(),\n enumerable: true\n },\n seekable: {\n get: () => timeRangesToArray(this.tech_.seekable()),\n enumerable: true\n },\n timestamp: {\n get: () => Date.now(),\n enumerable: true\n },\n videoPlaybackQuality: {\n get: () => this.tech_.getVideoPlaybackQuality(),\n enumerable: true\n }\n });\n this.tech_.one('canplay', this.playlistController_.setupFirstPlay.bind(this.playlistController_));\n this.tech_.on('bandwidthupdate', () => {\n if (this.options_.useBandwidthFromLocalStorage) {\n updateVhsLocalStorage({\n bandwidth: this.bandwidth,\n throughput: Math.round(this.throughput)\n });\n }\n });\n this.playlistController_.on('selectedinitialmedia', () => {\n // Add the manual rendition mix-in to VhsHandler\n renditionSelectionMixin(this);\n });\n this.playlistController_.sourceUpdater_.on('createdsourcebuffers', () => {\n this.setupEme_();\n }); // the bandwidth of the primary segment loader is our best\n // estimate of overall bandwidth\n\n this.on(this.playlistController_, 'progress', function () {\n this.tech_.trigger('progress');\n }); // In the live case, we need to ignore the very first `seeking` event since\n // that will be the result of the seek-to-live behavior\n\n this.on(this.playlistController_, 'firstplay', function () {\n this.ignoreNextSeekingEvent_ = true;\n });\n this.setupQualityLevels_(); // do nothing if the tech has been disposed already\n // this can occur if someone sets the src in player.ready(), for instance\n\n if (!this.tech_.el()) {\n return;\n }\n this.mediaSourceUrl_ = window$1.URL.createObjectURL(this.playlistController_.mediaSource);\n this.tech_.src(this.mediaSourceUrl_);\n }\n createKeySessions_() {\n const audioPlaylistLoader = this.playlistController_.mediaTypes_.AUDIO.activePlaylistLoader;\n this.logger_('waiting for EME key session creation');\n waitForKeySessionCreation({\n player: this.player_,\n sourceKeySystems: this.source_.keySystems,\n audioMedia: audioPlaylistLoader && audioPlaylistLoader.media(),\n mainPlaylists: this.playlists.main.playlists\n }).then(() => {\n this.logger_('created EME key session');\n this.playlistController_.sourceUpdater_.initializedEme();\n }).catch(err => {\n this.logger_('error while creating EME key session', err);\n this.player_.error({\n message: 'Failed to initialize media keys for EME',\n code: 3\n });\n });\n }\n handleWaitingForKey_() {\n // If waitingforkey is fired, it's possible that the data that's necessary to retrieve\n // the key is in the manifest. While this should've happened on initial source load, it\n // may happen again in live streams where the keys change, and the manifest info\n // reflects the update.\n //\n // Because videojs-contrib-eme compares the PSSH data we send to that of PSSH data it's\n // already requested keys for, we don't have to worry about this generating extraneous\n // requests.\n this.logger_('waitingforkey fired, attempting to create any new key sessions');\n this.createKeySessions_();\n }\n /**\n * If necessary and EME is available, sets up EME options and waits for key session\n * creation.\n *\n * This function also updates the source updater so taht it can be used, as for some\n * browsers, EME must be configured before content is appended (if appending unencrypted\n * content before encrypted content).\n */\n\n setupEme_() {\n const audioPlaylistLoader = this.playlistController_.mediaTypes_.AUDIO.activePlaylistLoader;\n const didSetupEmeOptions = setupEmeOptions({\n player: this.player_,\n sourceKeySystems: this.source_.keySystems,\n media: this.playlists.media(),\n audioMedia: audioPlaylistLoader && audioPlaylistLoader.media()\n });\n this.player_.tech_.on('keystatuschange', e => {\n this.playlistController_.updatePlaylistByKeyStatus(e.keyId, e.status);\n });\n this.handleWaitingForKey_ = this.handleWaitingForKey_.bind(this);\n this.player_.tech_.on('waitingforkey', this.handleWaitingForKey_);\n if (!didSetupEmeOptions) {\n // If EME options were not set up, we've done all we could to initialize EME.\n this.playlistController_.sourceUpdater_.initializedEme();\n return;\n }\n this.createKeySessions_();\n }\n /**\n * Initializes the quality levels and sets listeners to update them.\n *\n * @method setupQualityLevels_\n * @private\n */\n\n setupQualityLevels_() {\n const player = videojs.players[this.tech_.options_.playerId]; // if there isn't a player or there isn't a qualityLevels plugin\n // or qualityLevels_ listeners have already been setup, do nothing.\n\n if (!player || !player.qualityLevels || this.qualityLevels_) {\n return;\n }\n this.qualityLevels_ = player.qualityLevels();\n this.playlistController_.on('selectedinitialmedia', () => {\n handleVhsLoadedMetadata(this.qualityLevels_, this);\n });\n this.playlists.on('mediachange', () => {\n handleVhsMediaChange(this.qualityLevels_, this.playlists);\n });\n }\n /**\n * return the version\n */\n\n static version() {\n return {\n '@videojs/http-streaming': version$4,\n 'mux.js': version$3,\n 'mpd-parser': version$2,\n 'm3u8-parser': version$1,\n 'aes-decrypter': version\n };\n }\n /**\n * return the version\n */\n\n version() {\n return this.constructor.version();\n }\n canChangeType() {\n return SourceUpdater.canChangeType();\n }\n /**\n * Begin playing the video.\n */\n\n play() {\n this.playlistController_.play();\n }\n /**\n * a wrapper around the function in PlaylistController\n */\n\n setCurrentTime(currentTime) {\n this.playlistController_.setCurrentTime(currentTime);\n }\n /**\n * a wrapper around the function in PlaylistController\n */\n\n duration() {\n return this.playlistController_.duration();\n }\n /**\n * a wrapper around the function in PlaylistController\n */\n\n seekable() {\n return this.playlistController_.seekable();\n }\n /**\n * Abort all outstanding work and cleanup.\n */\n\n dispose() {\n if (this.playbackWatcher_) {\n this.playbackWatcher_.dispose();\n }\n if (this.playlistController_) {\n this.playlistController_.dispose();\n }\n if (this.qualityLevels_) {\n this.qualityLevels_.dispose();\n }\n if (this.tech_ && this.tech_.vhs) {\n delete this.tech_.vhs;\n }\n if (this.mediaSourceUrl_ && window$1.URL.revokeObjectURL) {\n window$1.URL.revokeObjectURL(this.mediaSourceUrl_);\n this.mediaSourceUrl_ = null;\n }\n if (this.tech_) {\n this.tech_.off('waitingforkey', this.handleWaitingForKey_);\n }\n super.dispose();\n }\n convertToProgramTime(time, callback) {\n return getProgramTime({\n playlist: this.playlistController_.media(),\n time,\n callback\n });\n } // the player must be playing before calling this\n\n seekToProgramTime(programTime, callback, pauseAfterSeek = true, retryCount = 2) {\n return seekToProgramTime({\n programTime,\n playlist: this.playlistController_.media(),\n retryCount,\n pauseAfterSeek,\n seekTo: this.options_.seekTo,\n tech: this.options_.tech,\n callback\n });\n }\n /**\n * Adds the onRequest, onResponse, offRequest and offResponse functions\n * to the VhsHandler xhr Object.\n */\n\n setupXhrHooks_() {\n /**\n * A player function for setting an onRequest hook\n *\n * @param {function} callback for request modifiction\n */\n this.xhr.onRequest = callback => {\n addOnRequestHook(this.xhr, callback);\n };\n /**\n * A player function for setting an onResponse hook\n *\n * @param {callback} callback for response data retrieval\n */\n\n this.xhr.onResponse = callback => {\n addOnResponseHook(this.xhr, callback);\n };\n /**\n * Deletes a player onRequest callback if it exists\n *\n * @param {function} callback to delete from the player set\n */\n\n this.xhr.offRequest = callback => {\n removeOnRequestHook(this.xhr, callback);\n };\n /**\n * Deletes a player onResponse callback if it exists\n *\n * @param {function} callback to delete from the player set\n */\n\n this.xhr.offResponse = callback => {\n removeOnResponseHook(this.xhr, callback);\n }; // Trigger an event on the player to notify the user that vhs is ready to set xhr hooks.\n // This allows hooks to be set before the source is set to vhs when handleSource is called.\n\n this.player_.trigger('xhr-hooks-ready');\n }\n attachStreamingEventListeners_() {\n const playlistControllerEvents = ['seekablerangeschanged', 'bufferedrangeschanged', 'contentsteeringloadstart', 'contentsteeringloadcomplete', 'contentsteeringparsed'];\n const playbackWatcher = ['gapjumped', 'playedrangeschanged']; // re-emit streaming events and payloads on the player.\n\n playlistControllerEvents.forEach(eventName => {\n this.playlistController_.on(eventName, metadata => {\n this.player_.trigger(_extends({}, metadata));\n });\n });\n playbackWatcher.forEach(eventName => {\n this.playbackWatcher_.on(eventName, metadata => {\n this.player_.trigger(_extends({}, metadata));\n });\n });\n }\n}\n/**\n * The Source Handler object, which informs video.js what additional\n * MIME types are supported and sets up playback. It is registered\n * automatically to the appropriate tech based on the capabilities of\n * the browser it is running in. It is not necessary to use or modify\n * this object in normal usage.\n */\n\nconst VhsSourceHandler = {\n name: 'videojs-http-streaming',\n VERSION: version$4,\n canHandleSource(srcObj, options = {}) {\n const localOptions = merge(videojs.options, options);\n return VhsSourceHandler.canPlayType(srcObj.type, localOptions);\n },\n handleSource(source, tech, options = {}) {\n const localOptions = merge(videojs.options, options);\n tech.vhs = new VhsHandler(source, tech, localOptions);\n tech.vhs.xhr = xhrFactory();\n tech.vhs.setupXhrHooks_();\n tech.vhs.src(source.src, source.type);\n return tech.vhs;\n },\n canPlayType(type, options) {\n const simpleType = simpleTypeFromSourceType(type);\n if (!simpleType) {\n return '';\n }\n const overrideNative = VhsSourceHandler.getOverrideNative(options);\n const supportsTypeNatively = Vhs.supportsTypeNatively(simpleType);\n const canUseMsePlayback = !supportsTypeNatively || overrideNative;\n return canUseMsePlayback ? 'maybe' : '';\n },\n getOverrideNative(options = {}) {\n const {\n vhs = {}\n } = options;\n const defaultOverrideNative = !(videojs.browser.IS_ANY_SAFARI || videojs.browser.IS_IOS);\n const {\n overrideNative = defaultOverrideNative\n } = vhs;\n return overrideNative;\n }\n};\n/**\n * Check to see if the native MediaSource object exists and supports\n * an MP4 container with both H.264 video and AAC-LC audio.\n *\n * @return {boolean} if native media sources are supported\n */\n\nconst supportsNativeMediaSources = () => {\n return browserSupportsCodec('avc1.4d400d,mp4a.40.2');\n}; // register source handlers with the appropriate techs\n\nif (supportsNativeMediaSources()) {\n videojs.getTech('Html5').registerSourceHandler(VhsSourceHandler, 0);\n}\nvideojs.VhsHandler = VhsHandler;\nvideojs.VhsSourceHandler = VhsSourceHandler;\nvideojs.Vhs = Vhs;\nif (!videojs.use) {\n videojs.registerComponent('Vhs', Vhs);\n}\nvideojs.options.vhs = videojs.options.vhs || {};\nif (!videojs.getPlugin || !videojs.getPlugin('reloadSourceOnError')) {\n videojs.registerPlugin('reloadSourceOnError', reloadSourceOnError);\n}\n\nexport { videojs as default };\n", "function _extends() {\n return _extends = Object.assign ? Object.assign.bind() : function (n) {\n for (var e = 1; e < arguments.length; e++) {\n var t = arguments[e];\n for (var r in t) ({}).hasOwnProperty.call(t, r) && (n[r] = t[r]);\n }\n return n;\n }, _extends.apply(null, arguments);\n}\nexport { _extends as default };", "import URLToolkit from 'url-toolkit';\nimport window from 'global/window';\nvar DEFAULT_LOCATION = 'http://example.com';\n\nvar resolveUrl = function resolveUrl(baseUrl, relativeUrl) {\n // return early if we don't need to resolve\n if (/^[a-z]+:/i.test(relativeUrl)) {\n return relativeUrl;\n } // if baseUrl is a data URI, ignore it and resolve everything relative to window.location\n\n\n if (/^data:/.test(baseUrl)) {\n baseUrl = window.location && window.location.href || '';\n } // IE11 supports URL but not the URL constructor\n // feature detect the behavior we want\n\n\n var nativeURL = typeof window.URL === 'function';\n var protocolLess = /^\\/\\//.test(baseUrl); // remove location if window.location isn't available (i.e. we're in node)\n // and if baseUrl isn't an absolute url\n\n var removeLocation = !window.location && !/\\/\\//i.test(baseUrl); // if the base URL is relative then combine with the current location\n\n if (nativeURL) {\n baseUrl = new window.URL(baseUrl, window.location || DEFAULT_LOCATION);\n } else if (!/\\/\\//i.test(baseUrl)) {\n baseUrl = URLToolkit.buildAbsoluteURL(window.location && window.location.href || '', baseUrl);\n }\n\n if (nativeURL) {\n var newUrl = new URL(relativeUrl, baseUrl); // if we're a protocol-less url, remove the protocol\n // and if we're location-less, remove the location\n // otherwise, return the url unmodified\n\n if (removeLocation) {\n return newUrl.href.slice(DEFAULT_LOCATION.length);\n } else if (protocolLess) {\n return newUrl.href.slice(newUrl.protocol.length);\n }\n\n return newUrl.href;\n }\n\n return URLToolkit.buildAbsoluteURL(baseUrl, relativeUrl);\n};\n\nexport default resolveUrl;", "/**\n * @file stream.js\n */\n\n/**\n * A lightweight readable stream implemention that handles event dispatching.\n *\n * @class Stream\n */\nvar Stream = /*#__PURE__*/function () {\n function Stream() {\n this.listeners = {};\n }\n /**\n * Add a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener the callback to be invoked when an event of\n * the specified type occurs\n */\n\n\n var _proto = Stream.prototype;\n\n _proto.on = function on(type, listener) {\n if (!this.listeners[type]) {\n this.listeners[type] = [];\n }\n\n this.listeners[type].push(listener);\n }\n /**\n * Remove a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener a function previously registered for this\n * type of event through `on`\n * @return {boolean} if we could turn it off or not\n */\n ;\n\n _proto.off = function off(type, listener) {\n if (!this.listeners[type]) {\n return false;\n }\n\n var index = this.listeners[type].indexOf(listener); // TODO: which is better?\n // In Video.js we slice listener functions\n // on trigger so that it does not mess up the order\n // while we loop through.\n //\n // Here we slice on off so that the loop in trigger\n // can continue using it's old reference to loop without\n // messing up the order.\n\n this.listeners[type] = this.listeners[type].slice(0);\n this.listeners[type].splice(index, 1);\n return index > -1;\n }\n /**\n * Trigger an event of the specified type on this stream. Any additional\n * arguments to this function are passed as parameters to event listeners.\n *\n * @param {string} type the event name\n */\n ;\n\n _proto.trigger = function trigger(type) {\n var callbacks = this.listeners[type];\n\n if (!callbacks) {\n return;\n } // Slicing the arguments on every invocation of this method\n // can add a significant amount of overhead. Avoid the\n // intermediate object creation for the common case of a\n // single callback argument\n\n\n if (arguments.length === 2) {\n var length = callbacks.length;\n\n for (var i = 0; i < length; ++i) {\n callbacks[i].call(this, arguments[1]);\n }\n } else {\n var args = Array.prototype.slice.call(arguments, 1);\n var _length = callbacks.length;\n\n for (var _i = 0; _i < _length; ++_i) {\n callbacks[_i].apply(this, args);\n }\n }\n }\n /**\n * Destroys the stream and cleans up.\n */\n ;\n\n _proto.dispose = function dispose() {\n this.listeners = {};\n }\n /**\n * Forwards all `data` events on this stream to the destination stream. The\n * destination stream should provide a method `push` to receive the data\n * events as they arrive.\n *\n * @param {Stream} destination the stream that will receive all `data` events\n * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options\n */\n ;\n\n _proto.pipe = function pipe(destination) {\n this.on('data', function (data) {\n destination.push(data);\n });\n };\n\n return Stream;\n}();\n\nexport { Stream as default };", "import window from 'global/window';\n\nvar atob = function atob(s) {\n return window.atob ? window.atob(s) : Buffer.from(s, 'base64').toString('binary');\n};\n\nexport default function decodeB64ToUint8Array(b64Text) {\n var decodedString = atob(b64Text);\n var array = new Uint8Array(decodedString.length);\n\n for (var i = 0; i < decodedString.length; i++) {\n array[i] = decodedString.charCodeAt(i);\n }\n\n return array;\n}", "/*! @name m3u8-parser @version 7.2.0 @license Apache-2.0 */\nimport Stream from '@videojs/vhs-utils/es/stream.js';\nimport _extends from '@babel/runtime/helpers/extends';\nimport decodeB64ToUint8Array from '@videojs/vhs-utils/es/decode-b64-to-uint8-array.js';\n\n/**\n * @file m3u8/line-stream.js\n */\n/**\n * A stream that buffers string input and generates a `data` event for each\n * line.\n *\n * @class LineStream\n * @extends Stream\n */\n\nclass LineStream extends Stream {\n constructor() {\n super();\n this.buffer = '';\n }\n /**\n * Add new data to be parsed.\n *\n * @param {string} data the text to process\n */\n\n\n push(data) {\n let nextNewline;\n this.buffer += data;\n nextNewline = this.buffer.indexOf('\\n');\n\n for (; nextNewline > -1; nextNewline = this.buffer.indexOf('\\n')) {\n this.trigger('data', this.buffer.substring(0, nextNewline));\n this.buffer = this.buffer.substring(nextNewline + 1);\n }\n }\n\n}\n\nconst TAB = String.fromCharCode(0x09);\n\nconst parseByterange = function (byterangeString) {\n // optionally match and capture 0+ digits before `@`\n // optionally match and capture 0+ digits after `@`\n const match = /([0-9.]*)?@?([0-9.]*)?/.exec(byterangeString || '');\n const result = {};\n\n if (match[1]) {\n result.length = parseInt(match[1], 10);\n }\n\n if (match[2]) {\n result.offset = parseInt(match[2], 10);\n }\n\n return result;\n};\n/**\n * \"forgiving\" attribute list psuedo-grammar:\n * attributes -> keyvalue (',' keyvalue)*\n * keyvalue -> key '=' value\n * key -> [^=]*\n * value -> '\"' [^\"]* '\"' | [^,]*\n */\n\n\nconst attributeSeparator = function () {\n const key = '[^=]*';\n const value = '\"[^\"]*\"|[^,]*';\n const keyvalue = '(?:' + key + ')=(?:' + value + ')';\n return new RegExp('(?:^|,)(' + keyvalue + ')');\n};\n/**\n * Parse attributes from a line given the separator\n *\n * @param {string} attributes the attribute line to parse\n */\n\n\nconst parseAttributes = function (attributes) {\n const result = {};\n\n if (!attributes) {\n return result;\n } // split the string using attributes as the separator\n\n\n const attrs = attributes.split(attributeSeparator());\n let i = attrs.length;\n let attr;\n\n while (i--) {\n // filter out unmatched portions of the string\n if (attrs[i] === '') {\n continue;\n } // split the key and value\n\n\n attr = /([^=]*)=(.*)/.exec(attrs[i]).slice(1); // trim whitespace and remove optional quotes around the value\n\n attr[0] = attr[0].replace(/^\\s+|\\s+$/g, '');\n attr[1] = attr[1].replace(/^\\s+|\\s+$/g, '');\n attr[1] = attr[1].replace(/^['\"](.*)['\"]$/g, '$1');\n result[attr[0]] = attr[1];\n }\n\n return result;\n};\n/**\n * Converts a string into a resolution object\n *\n * @param {string} resolution a string such as 3840x2160\n *\n * @return {Object} An object representing the resolution\n *\n */\n\n\nconst parseResolution = resolution => {\n const split = resolution.split('x');\n const result = {};\n\n if (split[0]) {\n result.width = parseInt(split[0], 10);\n }\n\n if (split[1]) {\n result.height = parseInt(split[1], 10);\n }\n\n return result;\n};\n/**\n * A line-level M3U8 parser event stream. It expects to receive input one\n * line at a time and performs a context-free parse of its contents. A stream\n * interpretation of a manifest can be useful if the manifest is expected to\n * be too large to fit comfortably into memory or the entirety of the input\n * is not immediately available. Otherwise, it's probably much easier to work\n * with a regular `Parser` object.\n *\n * Produces `data` events with an object that captures the parser's\n * interpretation of the input. That object has a property `tag` that is one\n * of `uri`, `comment`, or `tag`. URIs only have a single additional\n * property, `line`, which captures the entirety of the input without\n * interpretation. Comments similarly have a single additional property\n * `text` which is the input without the leading `#`.\n *\n * Tags always have a property `tagType` which is the lower-cased version of\n * the M3U8 directive without the `#EXT` or `#EXT-X-` prefix. For instance,\n * `#EXT-X-MEDIA-SEQUENCE` becomes `media-sequence` when parsed. Unrecognized\n * tags are given the tag type `unknown` and a single additional property\n * `data` with the remainder of the input.\n *\n * @class ParseStream\n * @extends Stream\n */\n\n\nclass ParseStream extends Stream {\n constructor() {\n super();\n this.customParsers = [];\n this.tagMappers = [];\n }\n /**\n * Parses an additional line of input.\n *\n * @param {string} line a single line of an M3U8 file to parse\n */\n\n\n push(line) {\n let match;\n let event; // strip whitespace\n\n line = line.trim();\n\n if (line.length === 0) {\n // ignore empty lines\n return;\n } // URIs\n\n\n if (line[0] !== '#') {\n this.trigger('data', {\n type: 'uri',\n uri: line\n });\n return;\n } // map tags\n\n\n const newLines = this.tagMappers.reduce((acc, mapper) => {\n const mappedLine = mapper(line); // skip if unchanged\n\n if (mappedLine === line) {\n return acc;\n }\n\n return acc.concat([mappedLine]);\n }, [line]);\n newLines.forEach(newLine => {\n for (let i = 0; i < this.customParsers.length; i++) {\n if (this.customParsers[i].call(this, newLine)) {\n return;\n }\n } // Comments\n\n\n if (newLine.indexOf('#EXT') !== 0) {\n this.trigger('data', {\n type: 'comment',\n text: newLine.slice(1)\n });\n return;\n } // strip off any carriage returns here so the regex matching\n // doesn't have to account for them.\n\n\n newLine = newLine.replace('\\r', ''); // Tags\n\n match = /^#EXTM3U/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'm3u'\n });\n return;\n }\n\n match = /^#EXTINF:([0-9\\.]*)?,?(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'inf'\n };\n\n if (match[1]) {\n event.duration = parseFloat(match[1]);\n }\n\n if (match[2]) {\n event.title = match[2];\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-TARGETDURATION:([0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'targetduration'\n };\n\n if (match[1]) {\n event.duration = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-VERSION:([0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'version'\n };\n\n if (match[1]) {\n event.version = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-MEDIA-SEQUENCE:(\\-?[0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'media-sequence'\n };\n\n if (match[1]) {\n event.number = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-DISCONTINUITY-SEQUENCE:(\\-?[0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'discontinuity-sequence'\n };\n\n if (match[1]) {\n event.number = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PLAYLIST-TYPE:(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'playlist-type'\n };\n\n if (match[1]) {\n event.playlistType = match[1];\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-BYTERANGE:(.*)?$/.exec(newLine);\n\n if (match) {\n event = _extends(parseByterange(match[1]), {\n type: 'tag',\n tagType: 'byterange'\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-ALLOW-CACHE:(YES|NO)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'allow-cache'\n };\n\n if (match[1]) {\n event.allowed = !/NO/.test(match[1]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-MAP:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'map'\n };\n\n if (match[1]) {\n const attributes = parseAttributes(match[1]);\n\n if (attributes.URI) {\n event.uri = attributes.URI;\n }\n\n if (attributes.BYTERANGE) {\n event.byterange = parseByterange(attributes.BYTERANGE);\n }\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-STREAM-INF:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'stream-inf'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]);\n\n if (event.attributes.RESOLUTION) {\n event.attributes.RESOLUTION = parseResolution(event.attributes.RESOLUTION);\n }\n\n if (event.attributes.BANDWIDTH) {\n event.attributes.BANDWIDTH = parseInt(event.attributes.BANDWIDTH, 10);\n }\n\n if (event.attributes['FRAME-RATE']) {\n event.attributes['FRAME-RATE'] = parseFloat(event.attributes['FRAME-RATE']);\n }\n\n if (event.attributes['PROGRAM-ID']) {\n event.attributes['PROGRAM-ID'] = parseInt(event.attributes['PROGRAM-ID'], 10);\n }\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-MEDIA:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'media'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-ENDLIST/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'endlist'\n });\n return;\n }\n\n match = /^#EXT-X-DISCONTINUITY/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'discontinuity'\n });\n return;\n }\n\n match = /^#EXT-X-PROGRAM-DATE-TIME:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'program-date-time'\n };\n\n if (match[1]) {\n event.dateTimeString = match[1];\n event.dateTimeObject = new Date(match[1]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-KEY:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'key'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]); // parse the IV string into a Uint32Array\n\n if (event.attributes.IV) {\n if (event.attributes.IV.substring(0, 2).toLowerCase() === '0x') {\n event.attributes.IV = event.attributes.IV.substring(2);\n }\n\n event.attributes.IV = event.attributes.IV.match(/.{8}/g);\n event.attributes.IV[0] = parseInt(event.attributes.IV[0], 16);\n event.attributes.IV[1] = parseInt(event.attributes.IV[1], 16);\n event.attributes.IV[2] = parseInt(event.attributes.IV[2], 16);\n event.attributes.IV[3] = parseInt(event.attributes.IV[3], 16);\n event.attributes.IV = new Uint32Array(event.attributes.IV);\n }\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-START:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'start'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]);\n event.attributes['TIME-OFFSET'] = parseFloat(event.attributes['TIME-OFFSET']);\n event.attributes.PRECISE = /YES/.test(event.attributes.PRECISE);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-CUE-OUT-CONT:(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'cue-out-cont'\n };\n\n if (match[1]) {\n event.data = match[1];\n } else {\n event.data = '';\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-CUE-OUT:(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'cue-out'\n };\n\n if (match[1]) {\n event.data = match[1];\n } else {\n event.data = '';\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-CUE-IN:?(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'cue-in'\n };\n\n if (match[1]) {\n event.data = match[1];\n } else {\n event.data = '';\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-SKIP:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'skip'\n };\n event.attributes = parseAttributes(match[1]);\n\n if (event.attributes.hasOwnProperty('SKIPPED-SEGMENTS')) {\n event.attributes['SKIPPED-SEGMENTS'] = parseInt(event.attributes['SKIPPED-SEGMENTS'], 10);\n }\n\n if (event.attributes.hasOwnProperty('RECENTLY-REMOVED-DATERANGES')) {\n event.attributes['RECENTLY-REMOVED-DATERANGES'] = event.attributes['RECENTLY-REMOVED-DATERANGES'].split(TAB);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PART:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'part'\n };\n event.attributes = parseAttributes(match[1]);\n ['DURATION'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n ['INDEPENDENT', 'GAP'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = /YES/.test(event.attributes[key]);\n }\n });\n\n if (event.attributes.hasOwnProperty('BYTERANGE')) {\n event.attributes.byterange = parseByterange(event.attributes.BYTERANGE);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-SERVER-CONTROL:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'server-control'\n };\n event.attributes = parseAttributes(match[1]);\n ['CAN-SKIP-UNTIL', 'PART-HOLD-BACK', 'HOLD-BACK'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n ['CAN-SKIP-DATERANGES', 'CAN-BLOCK-RELOAD'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = /YES/.test(event.attributes[key]);\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PART-INF:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'part-inf'\n };\n event.attributes = parseAttributes(match[1]);\n ['PART-TARGET'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PRELOAD-HINT:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'preload-hint'\n };\n event.attributes = parseAttributes(match[1]);\n ['BYTERANGE-START', 'BYTERANGE-LENGTH'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseInt(event.attributes[key], 10);\n const subkey = key === 'BYTERANGE-LENGTH' ? 'length' : 'offset';\n event.attributes.byterange = event.attributes.byterange || {};\n event.attributes.byterange[subkey] = event.attributes[key]; // only keep the parsed byterange object.\n\n delete event.attributes[key];\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-RENDITION-REPORT:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'rendition-report'\n };\n event.attributes = parseAttributes(match[1]);\n ['LAST-MSN', 'LAST-PART'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseInt(event.attributes[key], 10);\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-DATERANGE:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'daterange'\n };\n event.attributes = parseAttributes(match[1]);\n ['ID', 'CLASS'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = String(event.attributes[key]);\n }\n });\n ['START-DATE', 'END-DATE'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = new Date(event.attributes[key]);\n }\n });\n ['DURATION', 'PLANNED-DURATION'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n ['END-ON-NEXT'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = /YES/i.test(event.attributes[key]);\n }\n });\n ['SCTE35-CMD', ' SCTE35-OUT', 'SCTE35-IN'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = event.attributes[key].toString(16);\n }\n });\n const clientAttributePattern = /^X-([A-Z]+-)+[A-Z]+$/;\n\n for (const key in event.attributes) {\n if (!clientAttributePattern.test(key)) {\n continue;\n }\n\n const isHexaDecimal = /[0-9A-Fa-f]{6}/g.test(event.attributes[key]);\n const isDecimalFloating = /^\\d+(\\.\\d+)?$/.test(event.attributes[key]);\n event.attributes[key] = isHexaDecimal ? event.attributes[key].toString(16) : isDecimalFloating ? parseFloat(event.attributes[key]) : String(event.attributes[key]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-INDEPENDENT-SEGMENTS/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'independent-segments'\n });\n return;\n }\n\n match = /^#EXT-X-I-FRAMES-ONLY/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'i-frames-only'\n });\n return;\n }\n\n match = /^#EXT-X-CONTENT-STEERING:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'content-steering'\n };\n event.attributes = parseAttributes(match[1]);\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-I-FRAME-STREAM-INF:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'i-frame-playlist'\n };\n event.attributes = parseAttributes(match[1]);\n\n if (event.attributes.URI) {\n event.uri = event.attributes.URI;\n }\n\n if (event.attributes.BANDWIDTH) {\n event.attributes.BANDWIDTH = parseInt(event.attributes.BANDWIDTH, 10);\n }\n\n if (event.attributes.RESOLUTION) {\n event.attributes.RESOLUTION = parseResolution(event.attributes.RESOLUTION);\n }\n\n if (event.attributes['AVERAGE-BANDWIDTH']) {\n event.attributes['AVERAGE-BANDWIDTH'] = parseInt(event.attributes['AVERAGE-BANDWIDTH'], 10);\n }\n\n if (event.attributes['FRAME-RATE']) {\n event.attributes['FRAME-RATE'] = parseFloat(event.attributes['FRAME-RATE']);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-DEFINE:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'define'\n };\n event.attributes = parseAttributes(match[1]);\n this.trigger('data', event);\n return;\n } // unknown tag type\n\n\n this.trigger('data', {\n type: 'tag',\n data: newLine.slice(4)\n });\n });\n }\n /**\n * Add a parser for custom headers\n *\n * @param {Object} options a map of options for the added parser\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {string} options.customType the custom type to register to the output\n * @param {Function} [options.dataParser] function to parse the line into an object\n * @param {boolean} [options.segment] should tag data be attached to the segment object\n */\n\n\n addParser({\n expression,\n customType,\n dataParser,\n segment\n }) {\n if (typeof dataParser !== 'function') {\n dataParser = line => line;\n }\n\n this.customParsers.push(line => {\n const match = expression.exec(line);\n\n if (match) {\n this.trigger('data', {\n type: 'custom',\n data: dataParser(line),\n customType,\n segment\n });\n return true;\n }\n });\n }\n /**\n * Add a custom header mapper\n *\n * @param {Object} options\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {Function} options.map function to translate tag into a different tag\n */\n\n\n addTagMapper({\n expression,\n map\n }) {\n const mapFn = line => {\n if (expression.test(line)) {\n return map(line);\n }\n\n return line;\n };\n\n this.tagMappers.push(mapFn);\n }\n\n}\n\nconst camelCase = str => str.toLowerCase().replace(/-(\\w)/g, a => a[1].toUpperCase());\n\nconst camelCaseKeys = function (attributes) {\n const result = {};\n Object.keys(attributes).forEach(function (key) {\n result[camelCase(key)] = attributes[key];\n });\n return result;\n}; // set SERVER-CONTROL hold back based upon targetDuration and partTargetDuration\n// we need this helper because defaults are based upon targetDuration and\n// partTargetDuration being set, but they may not be if SERVER-CONTROL appears before\n// target durations are set.\n\n\nconst setHoldBack = function (manifest) {\n const {\n serverControl,\n targetDuration,\n partTargetDuration\n } = manifest;\n\n if (!serverControl) {\n return;\n }\n\n const tag = '#EXT-X-SERVER-CONTROL';\n const hb = 'holdBack';\n const phb = 'partHoldBack';\n const minTargetDuration = targetDuration && targetDuration * 3;\n const minPartDuration = partTargetDuration && partTargetDuration * 2;\n\n if (targetDuration && !serverControl.hasOwnProperty(hb)) {\n serverControl[hb] = minTargetDuration;\n this.trigger('info', {\n message: `${tag} defaulting HOLD-BACK to targetDuration * 3 (${minTargetDuration}).`\n });\n }\n\n if (minTargetDuration && serverControl[hb] < minTargetDuration) {\n this.trigger('warn', {\n message: `${tag} clamping HOLD-BACK (${serverControl[hb]}) to targetDuration * 3 (${minTargetDuration})`\n });\n serverControl[hb] = minTargetDuration;\n } // default no part hold back to part target duration * 3\n\n\n if (partTargetDuration && !serverControl.hasOwnProperty(phb)) {\n serverControl[phb] = partTargetDuration * 3;\n this.trigger('info', {\n message: `${tag} defaulting PART-HOLD-BACK to partTargetDuration * 3 (${serverControl[phb]}).`\n });\n } // if part hold back is too small default it to part target duration * 2\n\n\n if (partTargetDuration && serverControl[phb] < minPartDuration) {\n this.trigger('warn', {\n message: `${tag} clamping PART-HOLD-BACK (${serverControl[phb]}) to partTargetDuration * 2 (${minPartDuration}).`\n });\n serverControl[phb] = minPartDuration;\n }\n};\n/**\n * A parser for M3U8 files. The current interpretation of the input is\n * exposed as a property `manifest` on parser objects. It's just two lines to\n * create and parse a manifest once you have the contents available as a string:\n *\n * ```js\n * var parser = new m3u8.Parser();\n * parser.push(xhr.responseText);\n * ```\n *\n * New input can later be applied to update the manifest object by calling\n * `push` again.\n *\n * The parser attempts to create a usable manifest object even if the\n * underlying input is somewhat nonsensical. It emits `info` and `warning`\n * events during the parse if it encounters input that seems invalid or\n * requires some property of the manifest object to be defaulted.\n *\n * @class Parser\n * @param {Object} [opts] Options for the constructor, needed for substitutions\n * @param {string} [opts.uri] URL to check for query params\n * @param {Object} [opts.mainDefinitions] Definitions on main playlist that can be imported\n * @extends Stream\n */\n\n\nclass Parser extends Stream {\n constructor(opts = {}) {\n super();\n this.lineStream = new LineStream();\n this.parseStream = new ParseStream();\n this.lineStream.pipe(this.parseStream);\n this.mainDefinitions = opts.mainDefinitions || {};\n this.params = new URL(opts.uri, 'https://a.com').searchParams;\n this.lastProgramDateTime = null;\n /* eslint-disable consistent-this */\n\n const self = this;\n /* eslint-enable consistent-this */\n\n const uris = [];\n let currentUri = {}; // if specified, the active EXT-X-MAP definition\n\n let currentMap; // if specified, the active decryption key\n\n let key;\n let hasParts = false;\n\n const noop = function () {};\n\n const defaultMediaGroups = {\n 'AUDIO': {},\n 'VIDEO': {},\n 'CLOSED-CAPTIONS': {},\n 'SUBTITLES': {}\n }; // This is the Widevine UUID from DASH IF IOP. The same exact string is\n // used in MPDs with Widevine encrypted streams.\n\n const widevineUuid = 'urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed'; // group segments into numbered timelines delineated by discontinuities\n\n let currentTimeline = 0; // the manifest is empty until the parse stream begins delivering data\n\n this.manifest = {\n allowCache: true,\n discontinuityStarts: [],\n dateRanges: [],\n iFramePlaylists: [],\n segments: []\n }; // keep track of the last seen segment's byte range end, as segments are not required\n // to provide the offset, in which case it defaults to the next byte after the\n // previous segment\n\n let lastByterangeEnd = 0; // keep track of the last seen part's byte range end.\n\n let lastPartByterangeEnd = 0;\n const dateRangeTags = {};\n this.on('end', () => {\n // only add preloadSegment if we don't yet have a uri for it.\n // and we actually have parts/preloadHints\n if (currentUri.uri || !currentUri.parts && !currentUri.preloadHints) {\n return;\n }\n\n if (!currentUri.map && currentMap) {\n currentUri.map = currentMap;\n }\n\n if (!currentUri.key && key) {\n currentUri.key = key;\n }\n\n if (!currentUri.timeline && typeof currentTimeline === 'number') {\n currentUri.timeline = currentTimeline;\n }\n\n this.manifest.preloadSegment = currentUri;\n }); // update the manifest with the m3u8 entry from the parse stream\n\n this.parseStream.on('data', function (entry) {\n let mediaGroup;\n let rendition; // Replace variables in uris and attributes as defined in #EXT-X-DEFINE tags\n\n if (self.manifest.definitions) {\n for (const def in self.manifest.definitions) {\n if (entry.uri) {\n entry.uri = entry.uri.replace(`{$${def}}`, self.manifest.definitions[def]);\n }\n\n if (entry.attributes) {\n for (const attr in entry.attributes) {\n if (typeof entry.attributes[attr] === 'string') {\n entry.attributes[attr] = entry.attributes[attr].replace(`{$${def}}`, self.manifest.definitions[def]);\n }\n }\n }\n }\n }\n\n ({\n tag() {\n // switch based on the tag type\n (({\n version() {\n if (entry.version) {\n this.manifest.version = entry.version;\n }\n },\n\n 'allow-cache'() {\n this.manifest.allowCache = entry.allowed;\n\n if (!('allowed' in entry)) {\n this.trigger('info', {\n message: 'defaulting allowCache to YES'\n });\n this.manifest.allowCache = true;\n }\n },\n\n byterange() {\n const byterange = {};\n\n if ('length' in entry) {\n currentUri.byterange = byterange;\n byterange.length = entry.length;\n\n if (!('offset' in entry)) {\n /*\n * From the latest spec (as of this writing):\n * https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.2.2\n *\n * Same text since EXT-X-BYTERANGE's introduction in draft 7:\n * https://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.1)\n *\n * \"If o [offset] is not present, the sub-range begins at the next byte\n * following the sub-range of the previous media segment.\"\n */\n entry.offset = lastByterangeEnd;\n }\n }\n\n if ('offset' in entry) {\n currentUri.byterange = byterange;\n byterange.offset = entry.offset;\n }\n\n lastByterangeEnd = byterange.offset + byterange.length;\n },\n\n endlist() {\n this.manifest.endList = true;\n },\n\n inf() {\n if (!('mediaSequence' in this.manifest)) {\n this.manifest.mediaSequence = 0;\n this.trigger('info', {\n message: 'defaulting media sequence to zero'\n });\n }\n\n if (!('discontinuitySequence' in this.manifest)) {\n this.manifest.discontinuitySequence = 0;\n this.trigger('info', {\n message: 'defaulting discontinuity sequence to zero'\n });\n }\n\n if (entry.title) {\n currentUri.title = entry.title;\n }\n\n if (entry.duration > 0) {\n currentUri.duration = entry.duration;\n }\n\n if (entry.duration === 0) {\n currentUri.duration = 0.01;\n this.trigger('info', {\n message: 'updating zero segment duration to a small value'\n });\n }\n\n this.manifest.segments = uris;\n },\n\n key() {\n if (!entry.attributes) {\n this.trigger('warn', {\n message: 'ignoring key declaration without attribute list'\n });\n return;\n } // clear the active encryption key\n\n\n if (entry.attributes.METHOD === 'NONE') {\n key = null;\n return;\n }\n\n if (!entry.attributes.URI) {\n this.trigger('warn', {\n message: 'ignoring key declaration without URI'\n });\n return;\n }\n\n if (entry.attributes.KEYFORMAT === 'com.apple.streamingkeydelivery') {\n this.manifest.contentProtection = this.manifest.contentProtection || {}; // TODO: add full support for this.\n\n this.manifest.contentProtection['com.apple.fps.1_0'] = {\n attributes: entry.attributes\n };\n return;\n }\n\n if (entry.attributes.KEYFORMAT === 'com.microsoft.playready') {\n this.manifest.contentProtection = this.manifest.contentProtection || {}; // TODO: add full support for this.\n\n this.manifest.contentProtection['com.microsoft.playready'] = {\n uri: entry.attributes.URI\n };\n return;\n } // check if the content is encrypted for Widevine\n // Widevine/HLS spec: https://storage.googleapis.com/wvdocs/Widevine_DRM_HLS.pdf\n\n\n if (entry.attributes.KEYFORMAT === widevineUuid) {\n const VALID_METHODS = ['SAMPLE-AES', 'SAMPLE-AES-CTR', 'SAMPLE-AES-CENC'];\n\n if (VALID_METHODS.indexOf(entry.attributes.METHOD) === -1) {\n this.trigger('warn', {\n message: 'invalid key method provided for Widevine'\n });\n return;\n }\n\n if (entry.attributes.METHOD === 'SAMPLE-AES-CENC') {\n this.trigger('warn', {\n message: 'SAMPLE-AES-CENC is deprecated, please use SAMPLE-AES-CTR instead'\n });\n }\n\n if (entry.attributes.URI.substring(0, 23) !== 'data:text/plain;base64,') {\n this.trigger('warn', {\n message: 'invalid key URI provided for Widevine'\n });\n return;\n }\n\n if (!(entry.attributes.KEYID && entry.attributes.KEYID.substring(0, 2) === '0x')) {\n this.trigger('warn', {\n message: 'invalid key ID provided for Widevine'\n });\n return;\n } // if Widevine key attributes are valid, store them as `contentProtection`\n // on the manifest to emulate Widevine tag structure in a DASH mpd\n\n\n this.manifest.contentProtection = this.manifest.contentProtection || {};\n this.manifest.contentProtection['com.widevine.alpha'] = {\n attributes: {\n schemeIdUri: entry.attributes.KEYFORMAT,\n // remove '0x' from the key id string\n keyId: entry.attributes.KEYID.substring(2)\n },\n // decode the base64-encoded PSSH box\n pssh: decodeB64ToUint8Array(entry.attributes.URI.split(',')[1])\n };\n return;\n }\n\n if (!entry.attributes.METHOD) {\n this.trigger('warn', {\n message: 'defaulting key method to AES-128'\n });\n } // setup an encryption key for upcoming segments\n\n\n key = {\n method: entry.attributes.METHOD || 'AES-128',\n uri: entry.attributes.URI\n };\n\n if (typeof entry.attributes.IV !== 'undefined') {\n key.iv = entry.attributes.IV;\n }\n },\n\n 'media-sequence'() {\n if (!isFinite(entry.number)) {\n this.trigger('warn', {\n message: 'ignoring invalid media sequence: ' + entry.number\n });\n return;\n }\n\n this.manifest.mediaSequence = entry.number;\n },\n\n 'discontinuity-sequence'() {\n if (!isFinite(entry.number)) {\n this.trigger('warn', {\n message: 'ignoring invalid discontinuity sequence: ' + entry.number\n });\n return;\n }\n\n this.manifest.discontinuitySequence = entry.number;\n currentTimeline = entry.number;\n },\n\n 'playlist-type'() {\n if (!/VOD|EVENT/.test(entry.playlistType)) {\n this.trigger('warn', {\n message: 'ignoring unknown playlist type: ' + entry.playlist\n });\n return;\n }\n\n this.manifest.playlistType = entry.playlistType;\n },\n\n map() {\n currentMap = {};\n\n if (entry.uri) {\n currentMap.uri = entry.uri;\n }\n\n if (entry.byterange) {\n currentMap.byterange = entry.byterange;\n }\n\n if (key) {\n currentMap.key = key;\n }\n },\n\n 'stream-inf'() {\n this.manifest.playlists = uris;\n this.manifest.mediaGroups = this.manifest.mediaGroups || defaultMediaGroups;\n\n if (!entry.attributes) {\n this.trigger('warn', {\n message: 'ignoring empty stream-inf attributes'\n });\n return;\n }\n\n if (!currentUri.attributes) {\n currentUri.attributes = {};\n }\n\n _extends(currentUri.attributes, entry.attributes);\n },\n\n media() {\n this.manifest.mediaGroups = this.manifest.mediaGroups || defaultMediaGroups;\n\n if (!(entry.attributes && entry.attributes.TYPE && entry.attributes['GROUP-ID'] && entry.attributes.NAME)) {\n this.trigger('warn', {\n message: 'ignoring incomplete or missing media group'\n });\n return;\n } // find the media group, creating defaults as necessary\n\n\n const mediaGroupType = this.manifest.mediaGroups[entry.attributes.TYPE];\n mediaGroupType[entry.attributes['GROUP-ID']] = mediaGroupType[entry.attributes['GROUP-ID']] || {};\n mediaGroup = mediaGroupType[entry.attributes['GROUP-ID']]; // collect the rendition metadata\n\n rendition = {\n default: /yes/i.test(entry.attributes.DEFAULT)\n };\n\n if (rendition.default) {\n rendition.autoselect = true;\n } else {\n rendition.autoselect = /yes/i.test(entry.attributes.AUTOSELECT);\n }\n\n if (entry.attributes.LANGUAGE) {\n rendition.language = entry.attributes.LANGUAGE;\n }\n\n if (entry.attributes.URI) {\n rendition.uri = entry.attributes.URI;\n }\n\n if (entry.attributes['INSTREAM-ID']) {\n rendition.instreamId = entry.attributes['INSTREAM-ID'];\n }\n\n if (entry.attributes.CHARACTERISTICS) {\n rendition.characteristics = entry.attributes.CHARACTERISTICS;\n }\n\n if (entry.attributes.FORCED) {\n rendition.forced = /yes/i.test(entry.attributes.FORCED);\n } // insert the new rendition\n\n\n mediaGroup[entry.attributes.NAME] = rendition;\n },\n\n discontinuity() {\n currentTimeline += 1;\n currentUri.discontinuity = true;\n this.manifest.discontinuityStarts.push(uris.length);\n },\n\n 'program-date-time'() {\n if (typeof this.manifest.dateTimeString === 'undefined') {\n // PROGRAM-DATE-TIME is a media-segment tag, but for backwards\n // compatibility, we add the first occurence of the PROGRAM-DATE-TIME tag\n // to the manifest object\n // TODO: Consider removing this in future major version\n this.manifest.dateTimeString = entry.dateTimeString;\n this.manifest.dateTimeObject = entry.dateTimeObject;\n }\n\n currentUri.dateTimeString = entry.dateTimeString;\n currentUri.dateTimeObject = entry.dateTimeObject;\n const {\n lastProgramDateTime\n } = this;\n this.lastProgramDateTime = new Date(entry.dateTimeString).getTime(); // We should extrapolate Program Date Time backward only during first program date time occurrence.\n // Once we have at least one program date time point, we can always extrapolate it forward using lastProgramDateTime reference.\n\n if (lastProgramDateTime === null) {\n // Extrapolate Program Date Time backward\n // Since it is first program date time occurrence we're assuming that\n // all this.manifest.segments have no program date time info\n this.manifest.segments.reduceRight((programDateTime, segment) => {\n segment.programDateTime = programDateTime - segment.duration * 1000;\n return segment.programDateTime;\n }, this.lastProgramDateTime);\n }\n },\n\n targetduration() {\n if (!isFinite(entry.duration) || entry.duration < 0) {\n this.trigger('warn', {\n message: 'ignoring invalid target duration: ' + entry.duration\n });\n return;\n }\n\n this.manifest.targetDuration = entry.duration;\n setHoldBack.call(this, this.manifest);\n },\n\n start() {\n if (!entry.attributes || isNaN(entry.attributes['TIME-OFFSET'])) {\n this.trigger('warn', {\n message: 'ignoring start declaration without appropriate attribute list'\n });\n return;\n }\n\n this.manifest.start = {\n timeOffset: entry.attributes['TIME-OFFSET'],\n precise: entry.attributes.PRECISE\n };\n },\n\n 'cue-out'() {\n currentUri.cueOut = entry.data;\n },\n\n 'cue-out-cont'() {\n currentUri.cueOutCont = entry.data;\n },\n\n 'cue-in'() {\n currentUri.cueIn = entry.data;\n },\n\n 'skip'() {\n this.manifest.skip = camelCaseKeys(entry.attributes);\n this.warnOnMissingAttributes_('#EXT-X-SKIP', entry.attributes, ['SKIPPED-SEGMENTS']);\n },\n\n 'part'() {\n hasParts = true; // parts are always specifed before a segment\n\n const segmentIndex = this.manifest.segments.length;\n const part = camelCaseKeys(entry.attributes);\n currentUri.parts = currentUri.parts || [];\n currentUri.parts.push(part);\n\n if (part.byterange) {\n if (!part.byterange.hasOwnProperty('offset')) {\n part.byterange.offset = lastPartByterangeEnd;\n }\n\n lastPartByterangeEnd = part.byterange.offset + part.byterange.length;\n }\n\n const partIndex = currentUri.parts.length - 1;\n this.warnOnMissingAttributes_(`#EXT-X-PART #${partIndex} for segment #${segmentIndex}`, entry.attributes, ['URI', 'DURATION']);\n\n if (this.manifest.renditionReports) {\n this.manifest.renditionReports.forEach((r, i) => {\n if (!r.hasOwnProperty('lastPart')) {\n this.trigger('warn', {\n message: `#EXT-X-RENDITION-REPORT #${i} lacks required attribute(s): LAST-PART`\n });\n }\n });\n }\n },\n\n 'server-control'() {\n const attrs = this.manifest.serverControl = camelCaseKeys(entry.attributes);\n\n if (!attrs.hasOwnProperty('canBlockReload')) {\n attrs.canBlockReload = false;\n this.trigger('info', {\n message: '#EXT-X-SERVER-CONTROL defaulting CAN-BLOCK-RELOAD to false'\n });\n }\n\n setHoldBack.call(this, this.manifest);\n\n if (attrs.canSkipDateranges && !attrs.hasOwnProperty('canSkipUntil')) {\n this.trigger('warn', {\n message: '#EXT-X-SERVER-CONTROL lacks required attribute CAN-SKIP-UNTIL which is required when CAN-SKIP-DATERANGES is set'\n });\n }\n },\n\n 'preload-hint'() {\n // parts are always specifed before a segment\n const segmentIndex = this.manifest.segments.length;\n const hint = camelCaseKeys(entry.attributes);\n const isPart = hint.type && hint.type === 'PART';\n currentUri.preloadHints = currentUri.preloadHints || [];\n currentUri.preloadHints.push(hint);\n\n if (hint.byterange) {\n if (!hint.byterange.hasOwnProperty('offset')) {\n // use last part byterange end or zero if not a part.\n hint.byterange.offset = isPart ? lastPartByterangeEnd : 0;\n\n if (isPart) {\n lastPartByterangeEnd = hint.byterange.offset + hint.byterange.length;\n }\n }\n }\n\n const index = currentUri.preloadHints.length - 1;\n this.warnOnMissingAttributes_(`#EXT-X-PRELOAD-HINT #${index} for segment #${segmentIndex}`, entry.attributes, ['TYPE', 'URI']);\n\n if (!hint.type) {\n return;\n } // search through all preload hints except for the current one for\n // a duplicate type.\n\n\n for (let i = 0; i < currentUri.preloadHints.length - 1; i++) {\n const otherHint = currentUri.preloadHints[i];\n\n if (!otherHint.type) {\n continue;\n }\n\n if (otherHint.type === hint.type) {\n this.trigger('warn', {\n message: `#EXT-X-PRELOAD-HINT #${index} for segment #${segmentIndex} has the same TYPE ${hint.type} as preload hint #${i}`\n });\n }\n }\n },\n\n 'rendition-report'() {\n const report = camelCaseKeys(entry.attributes);\n this.manifest.renditionReports = this.manifest.renditionReports || [];\n this.manifest.renditionReports.push(report);\n const index = this.manifest.renditionReports.length - 1;\n const required = ['LAST-MSN', 'URI'];\n\n if (hasParts) {\n required.push('LAST-PART');\n }\n\n this.warnOnMissingAttributes_(`#EXT-X-RENDITION-REPORT #${index}`, entry.attributes, required);\n },\n\n 'part-inf'() {\n this.manifest.partInf = camelCaseKeys(entry.attributes);\n this.warnOnMissingAttributes_('#EXT-X-PART-INF', entry.attributes, ['PART-TARGET']);\n\n if (this.manifest.partInf.partTarget) {\n this.manifest.partTargetDuration = this.manifest.partInf.partTarget;\n }\n\n setHoldBack.call(this, this.manifest);\n },\n\n 'daterange'() {\n this.manifest.dateRanges.push(camelCaseKeys(entry.attributes));\n const index = this.manifest.dateRanges.length - 1;\n this.warnOnMissingAttributes_(`#EXT-X-DATERANGE #${index}`, entry.attributes, ['ID', 'START-DATE']);\n const dateRange = this.manifest.dateRanges[index];\n\n if (dateRange.endDate && dateRange.startDate && new Date(dateRange.endDate) < new Date(dateRange.startDate)) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE END-DATE must be equal to or later than the value of the START-DATE'\n });\n }\n\n if (dateRange.duration && dateRange.duration < 0) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE DURATION must not be negative'\n });\n }\n\n if (dateRange.plannedDuration && dateRange.plannedDuration < 0) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE PLANNED-DURATION must not be negative'\n });\n }\n\n const endOnNextYes = !!dateRange.endOnNext;\n\n if (endOnNextYes && !dateRange.class) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE with an END-ON-NEXT=YES attribute must have a CLASS attribute'\n });\n }\n\n if (endOnNextYes && (dateRange.duration || dateRange.endDate)) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE with an END-ON-NEXT=YES attribute must not contain DURATION or END-DATE attributes'\n });\n }\n\n if (dateRange.duration && dateRange.endDate) {\n const startDate = dateRange.startDate;\n const newDateInSeconds = startDate.getTime() + dateRange.duration * 1000;\n this.manifest.dateRanges[index].endDate = new Date(newDateInSeconds);\n }\n\n if (!dateRangeTags[dateRange.id]) {\n dateRangeTags[dateRange.id] = dateRange;\n } else {\n for (const attribute in dateRangeTags[dateRange.id]) {\n if (!!dateRange[attribute] && JSON.stringify(dateRangeTags[dateRange.id][attribute]) !== JSON.stringify(dateRange[attribute])) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE tags with the same ID in a playlist must have the same attributes values'\n });\n break;\n }\n } // if tags with the same ID do not have conflicting attributes, merge them\n\n\n const dateRangeWithSameId = this.manifest.dateRanges.findIndex(dateRangeToFind => dateRangeToFind.id === dateRange.id);\n this.manifest.dateRanges[dateRangeWithSameId] = _extends(this.manifest.dateRanges[dateRangeWithSameId], dateRange);\n dateRangeTags[dateRange.id] = _extends(dateRangeTags[dateRange.id], dateRange); // after merging, delete the duplicate dateRange that was added last\n\n this.manifest.dateRanges.pop();\n }\n },\n\n 'independent-segments'() {\n this.manifest.independentSegments = true;\n },\n\n 'i-frames-only'() {\n this.manifest.iFramesOnly = true;\n this.requiredCompatibilityversion(this.manifest.version, 4);\n },\n\n 'content-steering'() {\n this.manifest.contentSteering = camelCaseKeys(entry.attributes);\n this.warnOnMissingAttributes_('#EXT-X-CONTENT-STEERING', entry.attributes, ['SERVER-URI']);\n },\n\n /** @this {Parser} */\n define() {\n this.manifest.definitions = this.manifest.definitions || {};\n\n const addDef = (n, v) => {\n if (n in this.manifest.definitions) {\n // An EXT-X-DEFINE tag MUST NOT specify the same Variable Name as any other\n // EXT-X-DEFINE tag in the same Playlist. Parsers that encounter duplicate\n // Variable Name declarations MUST fail to parse the Playlist.\n this.trigger('error', {\n message: `EXT-X-DEFINE: Duplicate name ${n}`\n });\n return;\n }\n\n this.manifest.definitions[n] = v;\n };\n\n if ('QUERYPARAM' in entry.attributes) {\n if ('NAME' in entry.attributes || 'IMPORT' in entry.attributes) {\n // An EXT-X-DEFINE tag MUST contain either a NAME, an IMPORT, or a\n // QUERYPARAM attribute, but only one of the three. Otherwise, the\n // client MUST fail to parse the Playlist.\n this.trigger('error', {\n message: 'EXT-X-DEFINE: Invalid attributes'\n });\n return;\n }\n\n const val = this.params.get(entry.attributes.QUERYPARAM);\n\n if (!val) {\n // If the QUERYPARAM attribute value does not match any query parameter in\n // the URI or the matching parameter has no associated value, the parser\n // MUST fail to parse the Playlist. If more than one parameter matches,\n // any of the associated values MAY be used.\n this.trigger('error', {\n message: `EXT-X-DEFINE: No query param ${entry.attributes.QUERYPARAM}`\n });\n return;\n }\n\n addDef(entry.attributes.QUERYPARAM, decodeURIComponent(val));\n return;\n }\n\n if ('NAME' in entry.attributes) {\n if ('IMPORT' in entry.attributes) {\n // An EXT-X-DEFINE tag MUST contain either a NAME, an IMPORT, or a\n // QUERYPARAM attribute, but only one of the three. Otherwise, the\n // client MUST fail to parse the Playlist.\n this.trigger('error', {\n message: 'EXT-X-DEFINE: Invalid attributes'\n });\n return;\n }\n\n if (!('VALUE' in entry.attributes) || typeof entry.attributes.VALUE !== 'string') {\n // This attribute is REQUIRED if the EXT-X-DEFINE tag has a NAME attribute.\n // The quoted-string MAY be empty.\n this.trigger('error', {\n message: `EXT-X-DEFINE: No value for ${entry.attributes.NAME}`\n });\n return;\n }\n\n addDef(entry.attributes.NAME, entry.attributes.VALUE);\n return;\n }\n\n if ('IMPORT' in entry.attributes) {\n if (!this.mainDefinitions[entry.attributes.IMPORT]) {\n // Covers two conditions, as mainDefinitions will always be empty on main\n //\n // EXT-X-DEFINE tags containing the IMPORT attribute MUST NOT occur in\n // Multivariant Playlists; they are only allowed in Media Playlists.\n //\n // If the IMPORT attribute value does not match any Variable Name in the\n // Multivariant Playlist, or if the Media Playlist loaded from a\n // Multivariant Playlist, the parser MUST fail the Playlist.\n this.trigger('error', {\n message: `EXT-X-DEFINE: No value ${entry.attributes.IMPORT} to import, or IMPORT used on main playlist`\n });\n return;\n }\n\n addDef(entry.attributes.IMPORT, this.mainDefinitions[entry.attributes.IMPORT]);\n return;\n } // An EXT-X-DEFINE tag MUST contain either a NAME, an IMPORT, or a QUERYPARAM\n // attribute, but only one of the three. Otherwise, the client MUST fail to\n // parse the Playlist.\n\n\n this.trigger('error', {\n message: 'EXT-X-DEFINE: No attribute'\n });\n },\n\n 'i-frame-playlist'() {\n this.manifest.iFramePlaylists.push({\n attributes: entry.attributes,\n uri: entry.uri,\n timeline: currentTimeline\n });\n this.warnOnMissingAttributes_('#EXT-X-I-FRAME-STREAM-INF', entry.attributes, ['BANDWIDTH', 'URI']);\n }\n\n })[entry.tagType] || noop).call(self);\n },\n\n uri() {\n currentUri.uri = entry.uri;\n uris.push(currentUri); // if no explicit duration was declared, use the target duration\n\n if (this.manifest.targetDuration && !('duration' in currentUri)) {\n this.trigger('warn', {\n message: 'defaulting segment duration to the target duration'\n });\n currentUri.duration = this.manifest.targetDuration;\n } // annotate with encryption information, if necessary\n\n\n if (key) {\n currentUri.key = key;\n }\n\n currentUri.timeline = currentTimeline; // annotate with initialization segment information, if necessary\n\n if (currentMap) {\n currentUri.map = currentMap;\n } // reset the last byterange end as it needs to be 0 between parts\n\n\n lastPartByterangeEnd = 0; // Once we have at least one program date time we can always extrapolate it forward\n\n if (this.lastProgramDateTime !== null) {\n currentUri.programDateTime = this.lastProgramDateTime;\n this.lastProgramDateTime += currentUri.duration * 1000;\n } // prepare for the next URI\n\n\n currentUri = {};\n },\n\n comment() {// comments are not important for playback\n },\n\n custom() {\n // if this is segment-level data attach the output to the segment\n if (entry.segment) {\n currentUri.custom = currentUri.custom || {};\n currentUri.custom[entry.customType] = entry.data; // if this is manifest-level data attach to the top level manifest object\n } else {\n this.manifest.custom = this.manifest.custom || {};\n this.manifest.custom[entry.customType] = entry.data;\n }\n }\n\n })[entry.type].call(self);\n });\n }\n\n requiredCompatibilityversion(currentVersion, targetVersion) {\n if (currentVersion < targetVersion || !currentVersion) {\n this.trigger('warn', {\n message: `manifest must be at least version ${targetVersion}`\n });\n }\n }\n\n warnOnMissingAttributes_(identifier, attributes, required) {\n const missing = [];\n required.forEach(function (key) {\n if (!attributes.hasOwnProperty(key)) {\n missing.push(key);\n }\n });\n\n if (missing.length) {\n this.trigger('warn', {\n message: `${identifier} lacks required attribute(s): ${missing.join(', ')}`\n });\n }\n }\n /**\n * Parse the input string and update the manifest object.\n *\n * @param {string} chunk a potentially incomplete portion of the manifest\n */\n\n\n push(chunk) {\n this.lineStream.push(chunk);\n }\n /**\n * Flush any remaining input. This can be handy if the last line of an M3U8\n * manifest did not contain a trailing newline but the file has been\n * completely received.\n */\n\n\n end() {\n // flush any buffered input\n this.lineStream.push('\\n');\n\n if (this.manifest.dateRanges.length && this.lastProgramDateTime === null) {\n this.trigger('warn', {\n message: 'A playlist with EXT-X-DATERANGE tag must contain atleast one EXT-X-PROGRAM-DATE-TIME tag'\n });\n }\n\n this.lastProgramDateTime = null;\n this.trigger('end');\n }\n /**\n * Add an additional parser for non-standard tags\n *\n * @param {Object} options a map of options for the added parser\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {string} options.customType the custom type to register to the output\n * @param {Function} [options.dataParser] function to parse the line into an object\n * @param {boolean} [options.segment] should tag data be attached to the segment object\n */\n\n\n addParser(options) {\n this.parseStream.addParser(options);\n }\n /**\n * Add a custom header mapper\n *\n * @param {Object} options\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {Function} options.map function to translate tag into a different tag\n */\n\n\n addTagMapper(options) {\n this.parseStream.addTagMapper(options);\n }\n\n}\n\nexport { LineStream, ParseStream, Parser };\n", "import window from 'global/window';\nvar regexs = {\n // to determine mime types\n mp4: /^(av0?1|avc0?[1234]|vp0?9|flac|opus|mp3|mp4a|mp4v|stpp.ttml.im1t)/,\n webm: /^(vp0?[89]|av0?1|opus|vorbis)/,\n ogg: /^(vp0?[89]|theora|flac|opus|vorbis)/,\n // to determine if a codec is audio or video\n video: /^(av0?1|avc0?[1234]|vp0?[89]|hvc1|hev1|theora|mp4v)/,\n audio: /^(mp4a|flac|vorbis|opus|ac-[34]|ec-3|alac|mp3|speex|aac)/,\n text: /^(stpp.ttml.im1t)/,\n // mux.js support regex\n muxerVideo: /^(avc0?1)/,\n muxerAudio: /^(mp4a)/,\n // match nothing as muxer does not support text right now.\n // there cannot never be a character before the start of a string\n // so this matches nothing.\n muxerText: /a^/\n};\nvar mediaTypes = ['video', 'audio', 'text'];\nvar upperMediaTypes = ['Video', 'Audio', 'Text'];\n/**\n * Replace the old apple-style `avc1.
.
` codec string with the standard\n * `avc1.`\n *\n * @param {string} codec\n * Codec string to translate\n * @return {string}\n * The translated codec string\n */\n\nexport var translateLegacyCodec = function translateLegacyCodec(codec) {\n if (!codec) {\n return codec;\n }\n\n return codec.replace(/avc1\\.(\\d+)\\.(\\d+)/i, function (orig, profile, avcLevel) {\n var profileHex = ('00' + Number(profile).toString(16)).slice(-2);\n var avcLevelHex = ('00' + Number(avcLevel).toString(16)).slice(-2);\n return 'avc1.' + profileHex + '00' + avcLevelHex;\n });\n};\n/**\n * Replace the old apple-style `avc1.
.
` codec strings with the standard\n * `avc1.`\n *\n * @param {string[]} codecs\n * An array of codec strings to translate\n * @return {string[]}\n * The translated array of codec strings\n */\n\nexport var translateLegacyCodecs = function translateLegacyCodecs(codecs) {\n return codecs.map(translateLegacyCodec);\n};\n/**\n * Replace codecs in the codec string with the old apple-style `avc1.
.
` to the\n * standard `avc1.`.\n *\n * @param {string} codecString\n * The codec string\n * @return {string}\n * The codec string with old apple-style codecs replaced\n *\n * @private\n */\n\nexport var mapLegacyAvcCodecs = function mapLegacyAvcCodecs(codecString) {\n return codecString.replace(/avc1\\.(\\d+)\\.(\\d+)/i, function (match) {\n return translateLegacyCodecs([match])[0];\n });\n};\n/**\n * @typedef {Object} ParsedCodecInfo\n * @property {number} codecCount\n * Number of codecs parsed\n * @property {string} [videoCodec]\n * Parsed video codec (if found)\n * @property {string} [videoObjectTypeIndicator]\n * Video object type indicator (if found)\n * @property {string|null} audioProfile\n * Audio profile\n */\n\n/**\n * Parses a codec string to retrieve the number of codecs specified, the video codec and\n * object type indicator, and the audio profile.\n *\n * @param {string} [codecString]\n * The codec string to parse\n * @return {ParsedCodecInfo}\n * Parsed codec info\n */\n\nexport var parseCodecs = function parseCodecs(codecString) {\n if (codecString === void 0) {\n codecString = '';\n }\n\n var codecs = codecString.split(',');\n var result = [];\n codecs.forEach(function (codec) {\n codec = codec.trim();\n var codecType;\n mediaTypes.forEach(function (name) {\n var match = regexs[name].exec(codec.toLowerCase());\n\n if (!match || match.length <= 1) {\n return;\n }\n\n codecType = name; // maintain codec case\n\n var type = codec.substring(0, match[1].length);\n var details = codec.replace(type, '');\n result.push({\n type: type,\n details: details,\n mediaType: name\n });\n });\n\n if (!codecType) {\n result.push({\n type: codec,\n details: '',\n mediaType: 'unknown'\n });\n }\n });\n return result;\n};\n/**\n * Returns a ParsedCodecInfo object for the default alternate audio playlist if there is\n * a default alternate audio playlist for the provided audio group.\n *\n * @param {Object} master\n * The master playlist\n * @param {string} audioGroupId\n * ID of the audio group for which to find the default codec info\n * @return {ParsedCodecInfo}\n * Parsed codec info\n */\n\nexport var codecsFromDefault = function codecsFromDefault(master, audioGroupId) {\n if (!master.mediaGroups.AUDIO || !audioGroupId) {\n return null;\n }\n\n var audioGroup = master.mediaGroups.AUDIO[audioGroupId];\n\n if (!audioGroup) {\n return null;\n }\n\n for (var name in audioGroup) {\n var audioType = audioGroup[name];\n\n if (audioType.default && audioType.playlists) {\n // codec should be the same for all playlists within the audio type\n return parseCodecs(audioType.playlists[0].attributes.CODECS);\n }\n }\n\n return null;\n};\nexport var isVideoCodec = function isVideoCodec(codec) {\n if (codec === void 0) {\n codec = '';\n }\n\n return regexs.video.test(codec.trim().toLowerCase());\n};\nexport var isAudioCodec = function isAudioCodec(codec) {\n if (codec === void 0) {\n codec = '';\n }\n\n return regexs.audio.test(codec.trim().toLowerCase());\n};\nexport var isTextCodec = function isTextCodec(codec) {\n if (codec === void 0) {\n codec = '';\n }\n\n return regexs.text.test(codec.trim().toLowerCase());\n};\nexport var getMimeForCodec = function getMimeForCodec(codecString) {\n if (!codecString || typeof codecString !== 'string') {\n return;\n }\n\n var codecs = codecString.toLowerCase().split(',').map(function (c) {\n return translateLegacyCodec(c.trim());\n }); // default to video type\n\n var type = 'video'; // only change to audio type if the only codec we have is\n // audio\n\n if (codecs.length === 1 && isAudioCodec(codecs[0])) {\n type = 'audio';\n } else if (codecs.length === 1 && isTextCodec(codecs[0])) {\n // text uses application/ for now\n type = 'application';\n } // default the container to mp4\n\n\n var container = 'mp4'; // every codec must be able to go into the container\n // for that container to be the correct one\n\n if (codecs.every(function (c) {\n return regexs.mp4.test(c);\n })) {\n container = 'mp4';\n } else if (codecs.every(function (c) {\n return regexs.webm.test(c);\n })) {\n container = 'webm';\n } else if (codecs.every(function (c) {\n return regexs.ogg.test(c);\n })) {\n container = 'ogg';\n }\n\n return type + \"/\" + container + \";codecs=\\\"\" + codecString + \"\\\"\";\n};\nexport var browserSupportsCodec = function browserSupportsCodec(codecString) {\n if (codecString === void 0) {\n codecString = '';\n }\n\n return window.MediaSource && window.MediaSource.isTypeSupported && window.MediaSource.isTypeSupported(getMimeForCodec(codecString)) || false;\n};\nexport var muxerSupportsCodec = function muxerSupportsCodec(codecString) {\n if (codecString === void 0) {\n codecString = '';\n }\n\n return codecString.toLowerCase().split(',').every(function (codec) {\n codec = codec.trim(); // any match is supported.\n\n for (var i = 0; i < upperMediaTypes.length; i++) {\n var type = upperMediaTypes[i];\n\n if (regexs[\"muxer\" + type].test(codec)) {\n return true;\n }\n }\n\n return false;\n });\n};\nexport var DEFAULT_AUDIO_CODEC = 'mp4a.40.2';\nexport var DEFAULT_VIDEO_CODEC = 'avc1.4d400d';", "var MPEGURL_REGEX = /^(audio|video|application)\\/(x-|vnd\\.apple\\.)?mpegurl/i;\nvar DASH_REGEX = /^application\\/dash\\+xml/i;\n/**\n * Returns a string that describes the type of source based on a video source object's\n * media type.\n *\n * @see {@link https://dev.w3.org/html5/pf-summary/video.html#dom-source-type|Source Type}\n *\n * @param {string} type\n * Video source object media type\n * @return {('hls'|'dash'|'vhs-json'|null)}\n * VHS source type string\n */\n\nexport var simpleTypeFromSourceType = function simpleTypeFromSourceType(type) {\n if (MPEGURL_REGEX.test(type)) {\n return 'hls';\n }\n\n if (DASH_REGEX.test(type)) {\n return 'dash';\n } // Denotes the special case of a manifest object passed to http-streaming instead of a\n // source URL.\n //\n // See https://en.wikipedia.org/wiki/Media_type for details on specifying media types.\n //\n // In this case, vnd stands for vendor, video.js for the organization, VHS for this\n // project, and the +json suffix identifies the structure of the media type.\n\n\n if (type === 'application/vnd.videojs.vhs+json') {\n return 'vhs-json';\n }\n\n return null;\n};", "import window from 'global/window'; // const log2 = Math.log2 ? Math.log2 : (x) => (Math.log(x) / Math.log(2));\n\nvar repeat = function repeat(str, len) {\n var acc = '';\n\n while (len--) {\n acc += str;\n }\n\n return acc;\n}; // count the number of bits it would take to represent a number\n// we used to do this with log2 but BigInt does not support builtin math\n// Math.ceil(log2(x));\n\n\nexport var countBits = function countBits(x) {\n return x.toString(2).length;\n}; // count the number of whole bytes it would take to represent a number\n\nexport var countBytes = function countBytes(x) {\n return Math.ceil(countBits(x) / 8);\n};\nexport var padStart = function padStart(b, len, str) {\n if (str === void 0) {\n str = ' ';\n }\n\n return (repeat(str, len) + b.toString()).slice(-len);\n};\nexport var isArrayBufferView = function isArrayBufferView(obj) {\n if (ArrayBuffer.isView === 'function') {\n return ArrayBuffer.isView(obj);\n }\n\n return obj && obj.buffer instanceof ArrayBuffer;\n};\nexport var isTypedArray = function isTypedArray(obj) {\n return isArrayBufferView(obj);\n};\nexport var toUint8 = function toUint8(bytes) {\n if (bytes instanceof Uint8Array) {\n return bytes;\n }\n\n if (!Array.isArray(bytes) && !isTypedArray(bytes) && !(bytes instanceof ArrayBuffer)) {\n // any non-number or NaN leads to empty uint8array\n // eslint-disable-next-line\n if (typeof bytes !== 'number' || typeof bytes === 'number' && bytes !== bytes) {\n bytes = 0;\n } else {\n bytes = [bytes];\n }\n }\n\n return new Uint8Array(bytes && bytes.buffer || bytes, bytes && bytes.byteOffset || 0, bytes && bytes.byteLength || 0);\n};\nexport var toHexString = function toHexString(bytes) {\n bytes = toUint8(bytes);\n var str = '';\n\n for (var i = 0; i < bytes.length; i++) {\n str += padStart(bytes[i].toString(16), 2, '0');\n }\n\n return str;\n};\nexport var toBinaryString = function toBinaryString(bytes) {\n bytes = toUint8(bytes);\n var str = '';\n\n for (var i = 0; i < bytes.length; i++) {\n str += padStart(bytes[i].toString(2), 8, '0');\n }\n\n return str;\n};\nvar BigInt = window.BigInt || Number;\nvar BYTE_TABLE = [BigInt('0x1'), BigInt('0x100'), BigInt('0x10000'), BigInt('0x1000000'), BigInt('0x100000000'), BigInt('0x10000000000'), BigInt('0x1000000000000'), BigInt('0x100000000000000'), BigInt('0x10000000000000000')];\nexport var ENDIANNESS = function () {\n var a = new Uint16Array([0xFFCC]);\n var b = new Uint8Array(a.buffer, a.byteOffset, a.byteLength);\n\n if (b[0] === 0xFF) {\n return 'big';\n }\n\n if (b[0] === 0xCC) {\n return 'little';\n }\n\n return 'unknown';\n}();\nexport var IS_BIG_ENDIAN = ENDIANNESS === 'big';\nexport var IS_LITTLE_ENDIAN = ENDIANNESS === 'little';\nexport var bytesToNumber = function bytesToNumber(bytes, _temp) {\n var _ref = _temp === void 0 ? {} : _temp,\n _ref$signed = _ref.signed,\n signed = _ref$signed === void 0 ? false : _ref$signed,\n _ref$le = _ref.le,\n le = _ref$le === void 0 ? false : _ref$le;\n\n bytes = toUint8(bytes);\n var fn = le ? 'reduce' : 'reduceRight';\n var obj = bytes[fn] ? bytes[fn] : Array.prototype[fn];\n var number = obj.call(bytes, function (total, byte, i) {\n var exponent = le ? i : Math.abs(i + 1 - bytes.length);\n return total + BigInt(byte) * BYTE_TABLE[exponent];\n }, BigInt(0));\n\n if (signed) {\n var max = BYTE_TABLE[bytes.length] / BigInt(2) - BigInt(1);\n number = BigInt(number);\n\n if (number > max) {\n number -= max;\n number -= max;\n number -= BigInt(2);\n }\n }\n\n return Number(number);\n};\nexport var numberToBytes = function numberToBytes(number, _temp2) {\n var _ref2 = _temp2 === void 0 ? {} : _temp2,\n _ref2$le = _ref2.le,\n le = _ref2$le === void 0 ? false : _ref2$le;\n\n // eslint-disable-next-line\n if (typeof number !== 'bigint' && typeof number !== 'number' || typeof number === 'number' && number !== number) {\n number = 0;\n }\n\n number = BigInt(number);\n var byteCount = countBytes(number);\n var bytes = new Uint8Array(new ArrayBuffer(byteCount));\n\n for (var i = 0; i < byteCount; i++) {\n var byteIndex = le ? i : Math.abs(i + 1 - bytes.length);\n bytes[byteIndex] = Number(number / BYTE_TABLE[i] & BigInt(0xFF));\n\n if (number < 0) {\n bytes[byteIndex] = Math.abs(~bytes[byteIndex]);\n bytes[byteIndex] -= i === 0 ? 1 : 2;\n }\n }\n\n return bytes;\n};\nexport var bytesToString = function bytesToString(bytes) {\n if (!bytes) {\n return '';\n } // TODO: should toUint8 handle cases where we only have 8 bytes\n // but report more since this is a Uint16+ Array?\n\n\n bytes = Array.prototype.slice.call(bytes);\n var string = String.fromCharCode.apply(null, toUint8(bytes));\n\n try {\n return decodeURIComponent(escape(string));\n } catch (e) {// if decodeURIComponent/escape fails, we are dealing with partial\n // or full non string data. Just return the potentially garbled string.\n }\n\n return string;\n};\nexport var stringToBytes = function stringToBytes(string, stringIsBytes) {\n if (typeof string !== 'string' && string && typeof string.toString === 'function') {\n string = string.toString();\n }\n\n if (typeof string !== 'string') {\n return new Uint8Array();\n } // If the string already is bytes, we don't have to do this\n // otherwise we do this so that we split multi length characters\n // into individual bytes\n\n\n if (!stringIsBytes) {\n string = unescape(encodeURIComponent(string));\n }\n\n var view = new Uint8Array(string.length);\n\n for (var i = 0; i < string.length; i++) {\n view[i] = string.charCodeAt(i);\n }\n\n return view;\n};\nexport var concatTypedArrays = function concatTypedArrays() {\n for (var _len = arguments.length, buffers = new Array(_len), _key = 0; _key < _len; _key++) {\n buffers[_key] = arguments[_key];\n }\n\n buffers = buffers.filter(function (b) {\n return b && (b.byteLength || b.length) && typeof b !== 'string';\n });\n\n if (buffers.length <= 1) {\n // for 0 length we will return empty uint8\n // for 1 length we return the first uint8\n return toUint8(buffers[0]);\n }\n\n var totalLen = buffers.reduce(function (total, buf, i) {\n return total + (buf.byteLength || buf.length);\n }, 0);\n var tempBuffer = new Uint8Array(totalLen);\n var offset = 0;\n buffers.forEach(function (buf) {\n buf = toUint8(buf);\n tempBuffer.set(buf, offset);\n offset += buf.byteLength;\n });\n return tempBuffer;\n};\n/**\n * Check if the bytes \"b\" are contained within bytes \"a\".\n *\n * @param {Uint8Array|Array} a\n * Bytes to check in\n *\n * @param {Uint8Array|Array} b\n * Bytes to check for\n *\n * @param {Object} options\n * options\n *\n * @param {Array|Uint8Array} [offset=0]\n * offset to use when looking at bytes in a\n *\n * @param {Array|Uint8Array} [mask=[]]\n * mask to use on bytes before comparison.\n *\n * @return {boolean}\n * If all bytes in b are inside of a, taking into account\n * bit masks.\n */\n\nexport var bytesMatch = function bytesMatch(a, b, _temp3) {\n var _ref3 = _temp3 === void 0 ? {} : _temp3,\n _ref3$offset = _ref3.offset,\n offset = _ref3$offset === void 0 ? 0 : _ref3$offset,\n _ref3$mask = _ref3.mask,\n mask = _ref3$mask === void 0 ? [] : _ref3$mask;\n\n a = toUint8(a);\n b = toUint8(b); // ie 11 does not support uint8 every\n\n var fn = b.every ? b.every : Array.prototype.every;\n return b.length && a.length - offset >= b.length && // ie 11 doesn't support every on uin8\n fn.call(b, function (bByte, i) {\n var aByte = mask[i] ? mask[i] & a[offset + i] : a[offset + i];\n return bByte === aByte;\n });\n};\nexport var sliceBytes = function sliceBytes(src, start, end) {\n if (Uint8Array.prototype.slice) {\n return Uint8Array.prototype.slice.call(src, start, end);\n }\n\n return new Uint8Array(Array.prototype.slice.call(src, start, end));\n};\nexport var reverseBytes = function reverseBytes(src) {\n if (src.reverse) {\n return src.reverse();\n }\n\n return Array.prototype.reverse.call(src);\n};", "/*! @name mpd-parser @version 1.3.0 @license Apache-2.0 */\nimport resolveUrl from '@videojs/vhs-utils/es/resolve-url';\nimport window from 'global/window';\nimport { forEachMediaGroup } from '@videojs/vhs-utils/es/media-groups';\nimport decodeB64ToUint8Array from '@videojs/vhs-utils/es/decode-b64-to-uint8-array';\nimport { DOMParser } from '@xmldom/xmldom';\n\nvar version = \"1.3.0\";\n\nconst isObject = obj => {\n return !!obj && typeof obj === 'object';\n};\n\nconst merge = (...objects) => {\n return objects.reduce((result, source) => {\n if (typeof source !== 'object') {\n return result;\n }\n\n Object.keys(source).forEach(key => {\n if (Array.isArray(result[key]) && Array.isArray(source[key])) {\n result[key] = result[key].concat(source[key]);\n } else if (isObject(result[key]) && isObject(source[key])) {\n result[key] = merge(result[key], source[key]);\n } else {\n result[key] = source[key];\n }\n });\n return result;\n }, {});\n};\nconst values = o => Object.keys(o).map(k => o[k]);\n\nconst range = (start, end) => {\n const result = [];\n\n for (let i = start; i < end; i++) {\n result.push(i);\n }\n\n return result;\n};\nconst flatten = lists => lists.reduce((x, y) => x.concat(y), []);\nconst from = list => {\n if (!list.length) {\n return [];\n }\n\n const result = [];\n\n for (let i = 0; i < list.length; i++) {\n result.push(list[i]);\n }\n\n return result;\n};\nconst findIndexes = (l, key) => l.reduce((a, e, i) => {\n if (e[key]) {\n a.push(i);\n }\n\n return a;\n}, []);\n/**\n * Returns a union of the included lists provided each element can be identified by a key.\n *\n * @param {Array} list - list of lists to get the union of\n * @param {Function} keyFunction - the function to use as a key for each element\n *\n * @return {Array} the union of the arrays\n */\n\nconst union = (lists, keyFunction) => {\n return values(lists.reduce((acc, list) => {\n list.forEach(el => {\n acc[keyFunction(el)] = el;\n });\n return acc;\n }, {}));\n};\n\nvar errors = {\n INVALID_NUMBER_OF_PERIOD: 'INVALID_NUMBER_OF_PERIOD',\n INVALID_NUMBER_OF_CONTENT_STEERING: 'INVALID_NUMBER_OF_CONTENT_STEERING',\n DASH_EMPTY_MANIFEST: 'DASH_EMPTY_MANIFEST',\n DASH_INVALID_XML: 'DASH_INVALID_XML',\n NO_BASE_URL: 'NO_BASE_URL',\n MISSING_SEGMENT_INFORMATION: 'MISSING_SEGMENT_INFORMATION',\n SEGMENT_TIME_UNSPECIFIED: 'SEGMENT_TIME_UNSPECIFIED',\n UNSUPPORTED_UTC_TIMING_SCHEME: 'UNSUPPORTED_UTC_TIMING_SCHEME'\n};\n\n/**\n * @typedef {Object} SingleUri\n * @property {string} uri - relative location of segment\n * @property {string} resolvedUri - resolved location of segment\n * @property {Object} byterange - Object containing information on how to make byte range\n * requests following byte-range-spec per RFC2616.\n * @property {String} byterange.length - length of range request\n * @property {String} byterange.offset - byte offset of range request\n *\n * @see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1\n */\n\n/**\n * Converts a URLType node (5.3.9.2.3 Table 13) to a segment object\n * that conforms to how m3u8-parser is structured\n *\n * @see https://github.com/videojs/m3u8-parser\n *\n * @param {string} baseUrl - baseUrl provided by nodes\n * @param {string} source - source url for segment\n * @param {string} range - optional range used for range calls,\n * follows RFC 2616, Clause 14.35.1\n * @return {SingleUri} full segment information transformed into a format similar\n * to m3u8-parser\n */\n\nconst urlTypeToSegment = ({\n baseUrl = '',\n source = '',\n range = '',\n indexRange = ''\n}) => {\n const segment = {\n uri: source,\n resolvedUri: resolveUrl(baseUrl || '', source)\n };\n\n if (range || indexRange) {\n const rangeStr = range ? range : indexRange;\n const ranges = rangeStr.split('-'); // default to parsing this as a BigInt if possible\n\n let startRange = window.BigInt ? window.BigInt(ranges[0]) : parseInt(ranges[0], 10);\n let endRange = window.BigInt ? window.BigInt(ranges[1]) : parseInt(ranges[1], 10); // convert back to a number if less than MAX_SAFE_INTEGER\n\n if (startRange < Number.MAX_SAFE_INTEGER && typeof startRange === 'bigint') {\n startRange = Number(startRange);\n }\n\n if (endRange < Number.MAX_SAFE_INTEGER && typeof endRange === 'bigint') {\n endRange = Number(endRange);\n }\n\n let length;\n\n if (typeof endRange === 'bigint' || typeof startRange === 'bigint') {\n length = window.BigInt(endRange) - window.BigInt(startRange) + window.BigInt(1);\n } else {\n length = endRange - startRange + 1;\n }\n\n if (typeof length === 'bigint' && length < Number.MAX_SAFE_INTEGER) {\n length = Number(length);\n } // byterange should be inclusive according to\n // RFC 2616, Clause 14.35.1\n\n\n segment.byterange = {\n length,\n offset: startRange\n };\n }\n\n return segment;\n};\nconst byteRangeToString = byterange => {\n // `endRange` is one less than `offset + length` because the HTTP range\n // header uses inclusive ranges\n let endRange;\n\n if (typeof byterange.offset === 'bigint' || typeof byterange.length === 'bigint') {\n endRange = window.BigInt(byterange.offset) + window.BigInt(byterange.length) - window.BigInt(1);\n } else {\n endRange = byterange.offset + byterange.length - 1;\n }\n\n return `${byterange.offset}-${endRange}`;\n};\n\n/**\n * parse the end number attribue that can be a string\n * number, or undefined.\n *\n * @param {string|number|undefined} endNumber\n * The end number attribute.\n *\n * @return {number|null}\n * The result of parsing the end number.\n */\n\nconst parseEndNumber = endNumber => {\n if (endNumber && typeof endNumber !== 'number') {\n endNumber = parseInt(endNumber, 10);\n }\n\n if (isNaN(endNumber)) {\n return null;\n }\n\n return endNumber;\n};\n/**\n * Functions for calculating the range of available segments in static and dynamic\n * manifests.\n */\n\n\nconst segmentRange = {\n /**\n * Returns the entire range of available segments for a static MPD\n *\n * @param {Object} attributes\n * Inheritied MPD attributes\n * @return {{ start: number, end: number }}\n * The start and end numbers for available segments\n */\n static(attributes) {\n const {\n duration,\n timescale = 1,\n sourceDuration,\n periodDuration\n } = attributes;\n const endNumber = parseEndNumber(attributes.endNumber);\n const segmentDuration = duration / timescale;\n\n if (typeof endNumber === 'number') {\n return {\n start: 0,\n end: endNumber\n };\n }\n\n if (typeof periodDuration === 'number') {\n return {\n start: 0,\n end: periodDuration / segmentDuration\n };\n }\n\n return {\n start: 0,\n end: sourceDuration / segmentDuration\n };\n },\n\n /**\n * Returns the current live window range of available segments for a dynamic MPD\n *\n * @param {Object} attributes\n * Inheritied MPD attributes\n * @return {{ start: number, end: number }}\n * The start and end numbers for available segments\n */\n dynamic(attributes) {\n const {\n NOW,\n clientOffset,\n availabilityStartTime,\n timescale = 1,\n duration,\n periodStart = 0,\n minimumUpdatePeriod = 0,\n timeShiftBufferDepth = Infinity\n } = attributes;\n const endNumber = parseEndNumber(attributes.endNumber); // clientOffset is passed in at the top level of mpd-parser and is an offset calculated\n // after retrieving UTC server time.\n\n const now = (NOW + clientOffset) / 1000; // WC stands for Wall Clock.\n // Convert the period start time to EPOCH.\n\n const periodStartWC = availabilityStartTime + periodStart; // Period end in EPOCH is manifest's retrieval time + time until next update.\n\n const periodEndWC = now + minimumUpdatePeriod;\n const periodDuration = periodEndWC - periodStartWC;\n const segmentCount = Math.ceil(periodDuration * timescale / duration);\n const availableStart = Math.floor((now - periodStartWC - timeShiftBufferDepth) * timescale / duration);\n const availableEnd = Math.floor((now - periodStartWC) * timescale / duration);\n return {\n start: Math.max(0, availableStart),\n end: typeof endNumber === 'number' ? endNumber : Math.min(segmentCount, availableEnd)\n };\n }\n\n};\n/**\n * Maps a range of numbers to objects with information needed to build the corresponding\n * segment list\n *\n * @name toSegmentsCallback\n * @function\n * @param {number} number\n * Number of the segment\n * @param {number} index\n * Index of the number in the range list\n * @return {{ number: Number, duration: Number, timeline: Number, time: Number }}\n * Object with segment timing and duration info\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping a range of numbers to\n * information needed to build the segment list.\n *\n * @param {Object} attributes\n * Inherited MPD attributes\n * @return {toSegmentsCallback}\n * Callback map function\n */\n\nconst toSegments = attributes => number => {\n const {\n duration,\n timescale = 1,\n periodStart,\n startNumber = 1\n } = attributes;\n return {\n number: startNumber + number,\n duration: duration / timescale,\n timeline: periodStart,\n time: number * duration\n };\n};\n/**\n * Returns a list of objects containing segment timing and duration info used for\n * building the list of segments. This uses the @duration attribute specified\n * in the MPD manifest to derive the range of segments.\n *\n * @param {Object} attributes\n * Inherited MPD attributes\n * @return {{number: number, duration: number, time: number, timeline: number}[]}\n * List of Objects with segment timing and duration info\n */\n\nconst parseByDuration = attributes => {\n const {\n type,\n duration,\n timescale = 1,\n periodDuration,\n sourceDuration\n } = attributes;\n const {\n start,\n end\n } = segmentRange[type](attributes);\n const segments = range(start, end).map(toSegments(attributes));\n\n if (type === 'static') {\n const index = segments.length - 1; // section is either a period or the full source\n\n const sectionDuration = typeof periodDuration === 'number' ? periodDuration : sourceDuration; // final segment may be less than full segment duration\n\n segments[index].duration = sectionDuration - duration / timescale * index;\n }\n\n return segments;\n};\n\n/**\n * Translates SegmentBase into a set of segments.\n * (DASH SPEC Section 5.3.9.3.2) contains a set of nodes. Each\n * node should be translated into segment.\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @return {Object.} list of segments\n */\n\nconst segmentsFromBase = attributes => {\n const {\n baseUrl,\n initialization = {},\n sourceDuration,\n indexRange = '',\n periodStart,\n presentationTime,\n number = 0,\n duration\n } = attributes; // base url is required for SegmentBase to work, per spec (Section 5.3.9.2.1)\n\n if (!baseUrl) {\n throw new Error(errors.NO_BASE_URL);\n }\n\n const initSegment = urlTypeToSegment({\n baseUrl,\n source: initialization.sourceURL,\n range: initialization.range\n });\n const segment = urlTypeToSegment({\n baseUrl,\n source: baseUrl,\n indexRange\n });\n segment.map = initSegment; // If there is a duration, use it, otherwise use the given duration of the source\n // (since SegmentBase is only for one total segment)\n\n if (duration) {\n const segmentTimeInfo = parseByDuration(attributes);\n\n if (segmentTimeInfo.length) {\n segment.duration = segmentTimeInfo[0].duration;\n segment.timeline = segmentTimeInfo[0].timeline;\n }\n } else if (sourceDuration) {\n segment.duration = sourceDuration;\n segment.timeline = periodStart;\n } // If presentation time is provided, these segments are being generated by SIDX\n // references, and should use the time provided. For the general case of SegmentBase,\n // there should only be one segment in the period, so its presentation time is the same\n // as its period start.\n\n\n segment.presentationTime = presentationTime || periodStart;\n segment.number = number;\n return [segment];\n};\n/**\n * Given a playlist, a sidx box, and a baseUrl, update the segment list of the playlist\n * according to the sidx information given.\n *\n * playlist.sidx has metadadata about the sidx where-as the sidx param\n * is the parsed sidx box itself.\n *\n * @param {Object} playlist the playlist to update the sidx information for\n * @param {Object} sidx the parsed sidx box\n * @return {Object} the playlist object with the updated sidx information\n */\n\nconst addSidxSegmentsToPlaylist$1 = (playlist, sidx, baseUrl) => {\n // Retain init segment information\n const initSegment = playlist.sidx.map ? playlist.sidx.map : null; // Retain source duration from initial main manifest parsing\n\n const sourceDuration = playlist.sidx.duration; // Retain source timeline\n\n const timeline = playlist.timeline || 0;\n const sidxByteRange = playlist.sidx.byterange;\n const sidxEnd = sidxByteRange.offset + sidxByteRange.length; // Retain timescale of the parsed sidx\n\n const timescale = sidx.timescale; // referenceType 1 refers to other sidx boxes\n\n const mediaReferences = sidx.references.filter(r => r.referenceType !== 1);\n const segments = [];\n const type = playlist.endList ? 'static' : 'dynamic';\n const periodStart = playlist.sidx.timeline;\n let presentationTime = periodStart;\n let number = playlist.mediaSequence || 0; // firstOffset is the offset from the end of the sidx box\n\n let startIndex; // eslint-disable-next-line\n\n if (typeof sidx.firstOffset === 'bigint') {\n startIndex = window.BigInt(sidxEnd) + sidx.firstOffset;\n } else {\n startIndex = sidxEnd + sidx.firstOffset;\n }\n\n for (let i = 0; i < mediaReferences.length; i++) {\n const reference = sidx.references[i]; // size of the referenced (sub)segment\n\n const size = reference.referencedSize; // duration of the referenced (sub)segment, in the timescale\n // this will be converted to seconds when generating segments\n\n const duration = reference.subsegmentDuration; // should be an inclusive range\n\n let endIndex; // eslint-disable-next-line\n\n if (typeof startIndex === 'bigint') {\n endIndex = startIndex + window.BigInt(size) - window.BigInt(1);\n } else {\n endIndex = startIndex + size - 1;\n }\n\n const indexRange = `${startIndex}-${endIndex}`;\n const attributes = {\n baseUrl,\n timescale,\n timeline,\n periodStart,\n presentationTime,\n number,\n duration,\n sourceDuration,\n indexRange,\n type\n };\n const segment = segmentsFromBase(attributes)[0];\n\n if (initSegment) {\n segment.map = initSegment;\n }\n\n segments.push(segment);\n\n if (typeof startIndex === 'bigint') {\n startIndex += window.BigInt(size);\n } else {\n startIndex += size;\n }\n\n presentationTime += duration / timescale;\n number++;\n }\n\n playlist.segments = segments;\n return playlist;\n};\n\nconst SUPPORTED_MEDIA_TYPES = ['AUDIO', 'SUBTITLES']; // allow one 60fps frame as leniency (arbitrarily chosen)\n\nconst TIME_FUDGE = 1 / 60;\n/**\n * Given a list of timelineStarts, combines, dedupes, and sorts them.\n *\n * @param {TimelineStart[]} timelineStarts - list of timeline starts\n *\n * @return {TimelineStart[]} the combined and deduped timeline starts\n */\n\nconst getUniqueTimelineStarts = timelineStarts => {\n return union(timelineStarts, ({\n timeline\n }) => timeline).sort((a, b) => a.timeline > b.timeline ? 1 : -1);\n};\n/**\n * Finds the playlist with the matching NAME attribute.\n *\n * @param {Array} playlists - playlists to search through\n * @param {string} name - the NAME attribute to search for\n *\n * @return {Object|null} the matching playlist object, or null\n */\n\nconst findPlaylistWithName = (playlists, name) => {\n for (let i = 0; i < playlists.length; i++) {\n if (playlists[i].attributes.NAME === name) {\n return playlists[i];\n }\n }\n\n return null;\n};\n/**\n * Gets a flattened array of media group playlists.\n *\n * @param {Object} manifest - the main manifest object\n *\n * @return {Array} the media group playlists\n */\n\nconst getMediaGroupPlaylists = manifest => {\n let mediaGroupPlaylists = [];\n forEachMediaGroup(manifest, SUPPORTED_MEDIA_TYPES, (properties, type, group, label) => {\n mediaGroupPlaylists = mediaGroupPlaylists.concat(properties.playlists || []);\n });\n return mediaGroupPlaylists;\n};\n/**\n * Updates the playlist's media sequence numbers.\n *\n * @param {Object} config - options object\n * @param {Object} config.playlist - the playlist to update\n * @param {number} config.mediaSequence - the mediaSequence number to start with\n */\n\nconst updateMediaSequenceForPlaylist = ({\n playlist,\n mediaSequence\n}) => {\n playlist.mediaSequence = mediaSequence;\n playlist.segments.forEach((segment, index) => {\n segment.number = playlist.mediaSequence + index;\n });\n};\n/**\n * Updates the media and discontinuity sequence numbers of newPlaylists given oldPlaylists\n * and a complete list of timeline starts.\n *\n * If no matching playlist is found, only the discontinuity sequence number of the playlist\n * will be updated.\n *\n * Since early available timelines are not supported, at least one segment must be present.\n *\n * @param {Object} config - options object\n * @param {Object[]} oldPlaylists - the old playlists to use as a reference\n * @param {Object[]} newPlaylists - the new playlists to update\n * @param {Object} timelineStarts - all timelineStarts seen in the stream to this point\n */\n\nconst updateSequenceNumbers = ({\n oldPlaylists,\n newPlaylists,\n timelineStarts\n}) => {\n newPlaylists.forEach(playlist => {\n playlist.discontinuitySequence = timelineStarts.findIndex(function ({\n timeline\n }) {\n return timeline === playlist.timeline;\n }); // Playlists NAMEs come from DASH Representation IDs, which are mandatory\n // (see ISO_23009-1-2012 5.3.5.2).\n //\n // If the same Representation existed in a prior Period, it will retain the same NAME.\n\n const oldPlaylist = findPlaylistWithName(oldPlaylists, playlist.attributes.NAME);\n\n if (!oldPlaylist) {\n // Since this is a new playlist, the media sequence values can start from 0 without\n // consequence.\n return;\n } // TODO better support for live SIDX\n //\n // As of this writing, mpd-parser does not support multiperiod SIDX (in live or VOD).\n // This is evident by a playlist only having a single SIDX reference. In a multiperiod\n // playlist there would need to be multiple SIDX references. In addition, live SIDX is\n // not supported when the SIDX properties change on refreshes.\n //\n // In the future, if support needs to be added, the merging logic here can be called\n // after SIDX references are resolved. For now, exit early to prevent exceptions being\n // thrown due to undefined references.\n\n\n if (playlist.sidx) {\n return;\n } // Since we don't yet support early available timelines, we don't need to support\n // playlists with no segments.\n\n\n const firstNewSegment = playlist.segments[0];\n const oldMatchingSegmentIndex = oldPlaylist.segments.findIndex(function (oldSegment) {\n return Math.abs(oldSegment.presentationTime - firstNewSegment.presentationTime) < TIME_FUDGE;\n }); // No matching segment from the old playlist means the entire playlist was refreshed.\n // In this case the media sequence should account for this update, and the new segments\n // should be marked as discontinuous from the prior content, since the last prior\n // timeline was removed.\n\n if (oldMatchingSegmentIndex === -1) {\n updateMediaSequenceForPlaylist({\n playlist,\n mediaSequence: oldPlaylist.mediaSequence + oldPlaylist.segments.length\n });\n playlist.segments[0].discontinuity = true;\n playlist.discontinuityStarts.unshift(0); // No matching segment does not necessarily mean there's missing content.\n //\n // If the new playlist's timeline is the same as the last seen segment's timeline,\n // then a discontinuity can be added to identify that there's potentially missing\n // content. If there's no missing content, the discontinuity should still be rather\n // harmless. It's possible that if segment durations are accurate enough, that the\n // existence of a gap can be determined using the presentation times and durations,\n // but if the segment timing info is off, it may introduce more problems than simply\n // adding the discontinuity.\n //\n // If the new playlist's timeline is different from the last seen segment's timeline,\n // then a discontinuity can be added to identify that this is the first seen segment\n // of a new timeline. However, the logic at the start of this function that\n // determined the disconinuity sequence by timeline index is now off by one (the\n // discontinuity of the newest timeline hasn't yet fallen off the manifest...since\n // we added it), so the disconinuity sequence must be decremented.\n //\n // A period may also have a duration of zero, so the case of no segments is handled\n // here even though we don't yet support early available periods.\n\n if (!oldPlaylist.segments.length && playlist.timeline > oldPlaylist.timeline || oldPlaylist.segments.length && playlist.timeline > oldPlaylist.segments[oldPlaylist.segments.length - 1].timeline) {\n playlist.discontinuitySequence--;\n }\n\n return;\n } // If the first segment matched with a prior segment on a discontinuity (it's matching\n // on the first segment of a period), then the discontinuitySequence shouldn't be the\n // timeline's matching one, but instead should be the one prior, and the first segment\n // of the new manifest should be marked with a discontinuity.\n //\n // The reason for this special case is that discontinuity sequence shows how many\n // discontinuities have fallen off of the playlist, and discontinuities are marked on\n // the first segment of a new \"timeline.\" Because of this, while DASH will retain that\n // Period while the \"timeline\" exists, HLS keeps track of it via the discontinuity\n // sequence, and that first segment is an indicator, but can be removed before that\n // timeline is gone.\n\n\n const oldMatchingSegment = oldPlaylist.segments[oldMatchingSegmentIndex];\n\n if (oldMatchingSegment.discontinuity && !firstNewSegment.discontinuity) {\n firstNewSegment.discontinuity = true;\n playlist.discontinuityStarts.unshift(0);\n playlist.discontinuitySequence--;\n }\n\n updateMediaSequenceForPlaylist({\n playlist,\n mediaSequence: oldPlaylist.segments[oldMatchingSegmentIndex].number\n });\n });\n};\n/**\n * Given an old parsed manifest object and a new parsed manifest object, updates the\n * sequence and timing values within the new manifest to ensure that it lines up with the\n * old.\n *\n * @param {Array} oldManifest - the old main manifest object\n * @param {Array} newManifest - the new main manifest object\n *\n * @return {Object} the updated new manifest object\n */\n\nconst positionManifestOnTimeline = ({\n oldManifest,\n newManifest\n}) => {\n // Starting from v4.1.2 of the IOP, section 4.4.3.3 states:\n //\n // \"MPD@availabilityStartTime and Period@start shall not be changed over MPD updates.\"\n //\n // This was added from https://github.com/Dash-Industry-Forum/DASH-IF-IOP/issues/160\n //\n // Because of this change, and the difficulty of supporting periods with changing start\n // times, periods with changing start times are not supported. This makes the logic much\n // simpler, since periods with the same start time can be considerred the same period\n // across refreshes.\n //\n // To give an example as to the difficulty of handling periods where the start time may\n // change, if a single period manifest is refreshed with another manifest with a single\n // period, and both the start and end times are increased, then the only way to determine\n // if it's a new period or an old one that has changed is to look through the segments of\n // each playlist and determine the presentation time bounds to find a match. In addition,\n // if the period start changed to exceed the old period end, then there would be no\n // match, and it would not be possible to determine whether the refreshed period is a new\n // one or the old one.\n const oldPlaylists = oldManifest.playlists.concat(getMediaGroupPlaylists(oldManifest));\n const newPlaylists = newManifest.playlists.concat(getMediaGroupPlaylists(newManifest)); // Save all seen timelineStarts to the new manifest. Although this potentially means that\n // there's a \"memory leak\" in that it will never stop growing, in reality, only a couple\n // of properties are saved for each seen Period. Even long running live streams won't\n // generate too many Periods, unless the stream is watched for decades. In the future,\n // this can be optimized by mapping to discontinuity sequence numbers for each timeline,\n // but it may not become an issue, and the additional info can be useful for debugging.\n\n newManifest.timelineStarts = getUniqueTimelineStarts([oldManifest.timelineStarts, newManifest.timelineStarts]);\n updateSequenceNumbers({\n oldPlaylists,\n newPlaylists,\n timelineStarts: newManifest.timelineStarts\n });\n return newManifest;\n};\n\nconst generateSidxKey = sidx => sidx && sidx.uri + '-' + byteRangeToString(sidx.byterange);\n\nconst mergeDiscontiguousPlaylists = playlists => {\n // Break out playlists into groups based on their baseUrl\n const playlistsByBaseUrl = playlists.reduce(function (acc, cur) {\n if (!acc[cur.attributes.baseUrl]) {\n acc[cur.attributes.baseUrl] = [];\n }\n\n acc[cur.attributes.baseUrl].push(cur);\n return acc;\n }, {});\n let allPlaylists = [];\n Object.values(playlistsByBaseUrl).forEach(playlistGroup => {\n const mergedPlaylists = values(playlistGroup.reduce((acc, playlist) => {\n // assuming playlist IDs are the same across periods\n // TODO: handle multiperiod where representation sets are not the same\n // across periods\n const name = playlist.attributes.id + (playlist.attributes.lang || '');\n\n if (!acc[name]) {\n // First Period\n acc[name] = playlist;\n acc[name].attributes.timelineStarts = [];\n } else {\n // Subsequent Periods\n if (playlist.segments) {\n // first segment of subsequent periods signal a discontinuity\n if (playlist.segments[0]) {\n playlist.segments[0].discontinuity = true;\n }\n\n acc[name].segments.push(...playlist.segments);\n } // bubble up contentProtection, this assumes all DRM content\n // has the same contentProtection\n\n\n if (playlist.attributes.contentProtection) {\n acc[name].attributes.contentProtection = playlist.attributes.contentProtection;\n }\n }\n\n acc[name].attributes.timelineStarts.push({\n // Although they represent the same number, it's important to have both to make it\n // compatible with HLS potentially having a similar attribute.\n start: playlist.attributes.periodStart,\n timeline: playlist.attributes.periodStart\n });\n return acc;\n }, {}));\n allPlaylists = allPlaylists.concat(mergedPlaylists);\n });\n return allPlaylists.map(playlist => {\n playlist.discontinuityStarts = findIndexes(playlist.segments || [], 'discontinuity');\n return playlist;\n });\n};\n\nconst addSidxSegmentsToPlaylist = (playlist, sidxMapping) => {\n const sidxKey = generateSidxKey(playlist.sidx);\n const sidxMatch = sidxKey && sidxMapping[sidxKey] && sidxMapping[sidxKey].sidx;\n\n if (sidxMatch) {\n addSidxSegmentsToPlaylist$1(playlist, sidxMatch, playlist.sidx.resolvedUri);\n }\n\n return playlist;\n};\nconst addSidxSegmentsToPlaylists = (playlists, sidxMapping = {}) => {\n if (!Object.keys(sidxMapping).length) {\n return playlists;\n }\n\n for (const i in playlists) {\n playlists[i] = addSidxSegmentsToPlaylist(playlists[i], sidxMapping);\n }\n\n return playlists;\n};\nconst formatAudioPlaylist = ({\n attributes,\n segments,\n sidx,\n mediaSequence,\n discontinuitySequence,\n discontinuityStarts\n}, isAudioOnly) => {\n const playlist = {\n attributes: {\n NAME: attributes.id,\n BANDWIDTH: attributes.bandwidth,\n CODECS: attributes.codecs,\n ['PROGRAM-ID']: 1\n },\n uri: '',\n endList: attributes.type === 'static',\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n targetDuration: attributes.duration,\n discontinuitySequence,\n discontinuityStarts,\n timelineStarts: attributes.timelineStarts,\n mediaSequence,\n segments\n };\n\n if (attributes.contentProtection) {\n playlist.contentProtection = attributes.contentProtection;\n }\n\n if (attributes.serviceLocation) {\n playlist.attributes.serviceLocation = attributes.serviceLocation;\n }\n\n if (sidx) {\n playlist.sidx = sidx;\n }\n\n if (isAudioOnly) {\n playlist.attributes.AUDIO = 'audio';\n playlist.attributes.SUBTITLES = 'subs';\n }\n\n return playlist;\n};\nconst formatVttPlaylist = ({\n attributes,\n segments,\n mediaSequence,\n discontinuityStarts,\n discontinuitySequence\n}) => {\n if (typeof segments === 'undefined') {\n // vtt tracks may use single file in BaseURL\n segments = [{\n uri: attributes.baseUrl,\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n duration: attributes.sourceDuration,\n number: 0\n }]; // targetDuration should be the same duration as the only segment\n\n attributes.duration = attributes.sourceDuration;\n }\n\n const m3u8Attributes = {\n NAME: attributes.id,\n BANDWIDTH: attributes.bandwidth,\n ['PROGRAM-ID']: 1\n };\n\n if (attributes.codecs) {\n m3u8Attributes.CODECS = attributes.codecs;\n }\n\n const vttPlaylist = {\n attributes: m3u8Attributes,\n uri: '',\n endList: attributes.type === 'static',\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n targetDuration: attributes.duration,\n timelineStarts: attributes.timelineStarts,\n discontinuityStarts,\n discontinuitySequence,\n mediaSequence,\n segments\n };\n\n if (attributes.serviceLocation) {\n vttPlaylist.attributes.serviceLocation = attributes.serviceLocation;\n }\n\n return vttPlaylist;\n};\nconst organizeAudioPlaylists = (playlists, sidxMapping = {}, isAudioOnly = false) => {\n let mainPlaylist;\n const formattedPlaylists = playlists.reduce((a, playlist) => {\n const role = playlist.attributes.role && playlist.attributes.role.value || '';\n const language = playlist.attributes.lang || '';\n let label = playlist.attributes.label || 'main';\n\n if (language && !playlist.attributes.label) {\n const roleLabel = role ? ` (${role})` : '';\n label = `${playlist.attributes.lang}${roleLabel}`;\n }\n\n if (!a[label]) {\n a[label] = {\n language,\n autoselect: true,\n default: role === 'main',\n playlists: [],\n uri: ''\n };\n }\n\n const formatted = addSidxSegmentsToPlaylist(formatAudioPlaylist(playlist, isAudioOnly), sidxMapping);\n a[label].playlists.push(formatted);\n\n if (typeof mainPlaylist === 'undefined' && role === 'main') {\n mainPlaylist = playlist;\n mainPlaylist.default = true;\n }\n\n return a;\n }, {}); // if no playlists have role \"main\", mark the first as main\n\n if (!mainPlaylist) {\n const firstLabel = Object.keys(formattedPlaylists)[0];\n formattedPlaylists[firstLabel].default = true;\n }\n\n return formattedPlaylists;\n};\nconst organizeVttPlaylists = (playlists, sidxMapping = {}) => {\n return playlists.reduce((a, playlist) => {\n const label = playlist.attributes.label || playlist.attributes.lang || 'text';\n\n if (!a[label]) {\n a[label] = {\n language: label,\n default: false,\n autoselect: false,\n playlists: [],\n uri: ''\n };\n }\n\n a[label].playlists.push(addSidxSegmentsToPlaylist(formatVttPlaylist(playlist), sidxMapping));\n return a;\n }, {});\n};\n\nconst organizeCaptionServices = captionServices => captionServices.reduce((svcObj, svc) => {\n if (!svc) {\n return svcObj;\n }\n\n svc.forEach(service => {\n const {\n channel,\n language\n } = service;\n svcObj[language] = {\n autoselect: false,\n default: false,\n instreamId: channel,\n language\n };\n\n if (service.hasOwnProperty('aspectRatio')) {\n svcObj[language].aspectRatio = service.aspectRatio;\n }\n\n if (service.hasOwnProperty('easyReader')) {\n svcObj[language].easyReader = service.easyReader;\n }\n\n if (service.hasOwnProperty('3D')) {\n svcObj[language]['3D'] = service['3D'];\n }\n });\n return svcObj;\n}, {});\n\nconst formatVideoPlaylist = ({\n attributes,\n segments,\n sidx,\n discontinuityStarts\n}) => {\n const playlist = {\n attributes: {\n NAME: attributes.id,\n AUDIO: 'audio',\n SUBTITLES: 'subs',\n RESOLUTION: {\n width: attributes.width,\n height: attributes.height\n },\n CODECS: attributes.codecs,\n BANDWIDTH: attributes.bandwidth,\n ['PROGRAM-ID']: 1\n },\n uri: '',\n endList: attributes.type === 'static',\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n targetDuration: attributes.duration,\n discontinuityStarts,\n timelineStarts: attributes.timelineStarts,\n segments\n };\n\n if (attributes.frameRate) {\n playlist.attributes['FRAME-RATE'] = attributes.frameRate;\n }\n\n if (attributes.contentProtection) {\n playlist.contentProtection = attributes.contentProtection;\n }\n\n if (attributes.serviceLocation) {\n playlist.attributes.serviceLocation = attributes.serviceLocation;\n }\n\n if (sidx) {\n playlist.sidx = sidx;\n }\n\n return playlist;\n};\n\nconst videoOnly = ({\n attributes\n}) => attributes.mimeType === 'video/mp4' || attributes.mimeType === 'video/webm' || attributes.contentType === 'video';\n\nconst audioOnly = ({\n attributes\n}) => attributes.mimeType === 'audio/mp4' || attributes.mimeType === 'audio/webm' || attributes.contentType === 'audio';\n\nconst vttOnly = ({\n attributes\n}) => attributes.mimeType === 'text/vtt' || attributes.contentType === 'text';\n/**\n * Contains start and timeline properties denoting a timeline start. For DASH, these will\n * be the same number.\n *\n * @typedef {Object} TimelineStart\n * @property {number} start - the start time of the timeline\n * @property {number} timeline - the timeline number\n */\n\n/**\n * Adds appropriate media and discontinuity sequence values to the segments and playlists.\n *\n * Throughout mpd-parser, the `number` attribute is used in relation to `startNumber`, a\n * DASH specific attribute used in constructing segment URI's from templates. However, from\n * an HLS perspective, the `number` attribute on a segment would be its `mediaSequence`\n * value, which should start at the original media sequence value (or 0) and increment by 1\n * for each segment thereafter. Since DASH's `startNumber` values are independent per\n * period, it doesn't make sense to use it for `number`. Instead, assume everything starts\n * from a 0 mediaSequence value and increment from there.\n *\n * Note that VHS currently doesn't use the `number` property, but it can be helpful for\n * debugging and making sense of the manifest.\n *\n * For live playlists, to account for values increasing in manifests when periods are\n * removed on refreshes, merging logic should be used to update the numbers to their\n * appropriate values (to ensure they're sequential and increasing).\n *\n * @param {Object[]} playlists - the playlists to update\n * @param {TimelineStart[]} timelineStarts - the timeline starts for the manifest\n */\n\n\nconst addMediaSequenceValues = (playlists, timelineStarts) => {\n // increment all segments sequentially\n playlists.forEach(playlist => {\n playlist.mediaSequence = 0;\n playlist.discontinuitySequence = timelineStarts.findIndex(function ({\n timeline\n }) {\n return timeline === playlist.timeline;\n });\n\n if (!playlist.segments) {\n return;\n }\n\n playlist.segments.forEach((segment, index) => {\n segment.number = index;\n });\n });\n};\n/**\n * Given a media group object, flattens all playlists within the media group into a single\n * array.\n *\n * @param {Object} mediaGroupObject - the media group object\n *\n * @return {Object[]}\n * The media group playlists\n */\n\nconst flattenMediaGroupPlaylists = mediaGroupObject => {\n if (!mediaGroupObject) {\n return [];\n }\n\n return Object.keys(mediaGroupObject).reduce((acc, label) => {\n const labelContents = mediaGroupObject[label];\n return acc.concat(labelContents.playlists);\n }, []);\n};\nconst toM3u8 = ({\n dashPlaylists,\n locations,\n contentSteering,\n sidxMapping = {},\n previousManifest,\n eventStream\n}) => {\n if (!dashPlaylists.length) {\n return {};\n } // grab all main manifest attributes\n\n\n const {\n sourceDuration: duration,\n type,\n suggestedPresentationDelay,\n minimumUpdatePeriod\n } = dashPlaylists[0].attributes;\n const videoPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(videoOnly)).map(formatVideoPlaylist);\n const audioPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(audioOnly));\n const vttPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(vttOnly));\n const captions = dashPlaylists.map(playlist => playlist.attributes.captionServices).filter(Boolean);\n const manifest = {\n allowCache: true,\n discontinuityStarts: [],\n segments: [],\n endList: true,\n mediaGroups: {\n AUDIO: {},\n VIDEO: {},\n ['CLOSED-CAPTIONS']: {},\n SUBTITLES: {}\n },\n uri: '',\n duration,\n playlists: addSidxSegmentsToPlaylists(videoPlaylists, sidxMapping)\n };\n\n if (minimumUpdatePeriod >= 0) {\n manifest.minimumUpdatePeriod = minimumUpdatePeriod * 1000;\n }\n\n if (locations) {\n manifest.locations = locations;\n }\n\n if (contentSteering) {\n manifest.contentSteering = contentSteering;\n }\n\n if (type === 'dynamic') {\n manifest.suggestedPresentationDelay = suggestedPresentationDelay;\n }\n\n if (eventStream && eventStream.length > 0) {\n manifest.eventStream = eventStream;\n }\n\n const isAudioOnly = manifest.playlists.length === 0;\n const organizedAudioGroup = audioPlaylists.length ? organizeAudioPlaylists(audioPlaylists, sidxMapping, isAudioOnly) : null;\n const organizedVttGroup = vttPlaylists.length ? organizeVttPlaylists(vttPlaylists, sidxMapping) : null;\n const formattedPlaylists = videoPlaylists.concat(flattenMediaGroupPlaylists(organizedAudioGroup), flattenMediaGroupPlaylists(organizedVttGroup));\n const playlistTimelineStarts = formattedPlaylists.map(({\n timelineStarts\n }) => timelineStarts);\n manifest.timelineStarts = getUniqueTimelineStarts(playlistTimelineStarts);\n addMediaSequenceValues(formattedPlaylists, manifest.timelineStarts);\n\n if (organizedAudioGroup) {\n manifest.mediaGroups.AUDIO.audio = organizedAudioGroup;\n }\n\n if (organizedVttGroup) {\n manifest.mediaGroups.SUBTITLES.subs = organizedVttGroup;\n }\n\n if (captions.length) {\n manifest.mediaGroups['CLOSED-CAPTIONS'].cc = organizeCaptionServices(captions);\n }\n\n if (previousManifest) {\n return positionManifestOnTimeline({\n oldManifest: previousManifest,\n newManifest: manifest\n });\n }\n\n return manifest;\n};\n\n/**\n * Calculates the R (repetition) value for a live stream (for the final segment\n * in a manifest where the r value is negative 1)\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {number} time\n * current time (typically the total time up until the final segment)\n * @param {number} duration\n * duration property for the given \n *\n * @return {number}\n * R value to reach the end of the given period\n */\nconst getLiveRValue = (attributes, time, duration) => {\n const {\n NOW,\n clientOffset,\n availabilityStartTime,\n timescale = 1,\n periodStart = 0,\n minimumUpdatePeriod = 0\n } = attributes;\n const now = (NOW + clientOffset) / 1000;\n const periodStartWC = availabilityStartTime + periodStart;\n const periodEndWC = now + minimumUpdatePeriod;\n const periodDuration = periodEndWC - periodStartWC;\n return Math.ceil((periodDuration * timescale - time) / duration);\n};\n/**\n * Uses information provided by SegmentTemplate.SegmentTimeline to determine segment\n * timing and duration\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n *\n * @return {{number: number, duration: number, time: number, timeline: number}[]}\n * List of Objects with segment timing and duration info\n */\n\n\nconst parseByTimeline = (attributes, segmentTimeline) => {\n const {\n type,\n minimumUpdatePeriod = 0,\n media = '',\n sourceDuration,\n timescale = 1,\n startNumber = 1,\n periodStart: timeline\n } = attributes;\n const segments = [];\n let time = -1;\n\n for (let sIndex = 0; sIndex < segmentTimeline.length; sIndex++) {\n const S = segmentTimeline[sIndex];\n const duration = S.d;\n const repeat = S.r || 0;\n const segmentTime = S.t || 0;\n\n if (time < 0) {\n // first segment\n time = segmentTime;\n }\n\n if (segmentTime && segmentTime > time) {\n // discontinuity\n // TODO: How to handle this type of discontinuity\n // timeline++ here would treat it like HLS discontuity and content would\n // get appended without gap\n // E.G.\n // \n // \n // \n // \n // would have $Time$ values of [0, 1, 2, 5]\n // should this be appened at time positions [0, 1, 2, 3],(#EXT-X-DISCONTINUITY)\n // or [0, 1, 2, gap, gap, 5]? (#EXT-X-GAP)\n // does the value of sourceDuration consider this when calculating arbitrary\n // negative @r repeat value?\n // E.G. Same elements as above with this added at the end\n // \n // with a sourceDuration of 10\n // Would the 2 gaps be included in the time duration calculations resulting in\n // 8 segments with $Time$ values of [0, 1, 2, 5, 6, 7, 8, 9] or 10 segments\n // with $Time$ values of [0, 1, 2, 5, 6, 7, 8, 9, 10, 11] ?\n time = segmentTime;\n }\n\n let count;\n\n if (repeat < 0) {\n const nextS = sIndex + 1;\n\n if (nextS === segmentTimeline.length) {\n // last segment\n if (type === 'dynamic' && minimumUpdatePeriod > 0 && media.indexOf('$Number$') > 0) {\n count = getLiveRValue(attributes, time, duration);\n } else {\n // TODO: This may be incorrect depending on conclusion of TODO above\n count = (sourceDuration * timescale - time) / duration;\n }\n } else {\n count = (segmentTimeline[nextS].t - time) / duration;\n }\n } else {\n count = repeat + 1;\n }\n\n const end = startNumber + segments.length + count;\n let number = startNumber + segments.length;\n\n while (number < end) {\n segments.push({\n number,\n duration: duration / timescale,\n time,\n timeline\n });\n time += duration;\n number++;\n }\n }\n\n return segments;\n};\n\nconst identifierPattern = /\\$([A-z]*)(?:(%0)([0-9]+)d)?\\$/g;\n/**\n * Replaces template identifiers with corresponding values. To be used as the callback\n * for String.prototype.replace\n *\n * @name replaceCallback\n * @function\n * @param {string} match\n * Entire match of identifier\n * @param {string} identifier\n * Name of matched identifier\n * @param {string} format\n * Format tag string. Its presence indicates that padding is expected\n * @param {string} width\n * Desired length of the replaced value. Values less than this width shall be left\n * zero padded\n * @return {string}\n * Replacement for the matched identifier\n */\n\n/**\n * Returns a function to be used as a callback for String.prototype.replace to replace\n * template identifiers\n *\n * @param {Obect} values\n * Object containing values that shall be used to replace known identifiers\n * @param {number} values.RepresentationID\n * Value of the Representation@id attribute\n * @param {number} values.Number\n * Number of the corresponding segment\n * @param {number} values.Bandwidth\n * Value of the Representation@bandwidth attribute.\n * @param {number} values.Time\n * Timestamp value of the corresponding segment\n * @return {replaceCallback}\n * Callback to be used with String.prototype.replace to replace identifiers\n */\n\nconst identifierReplacement = values => (match, identifier, format, width) => {\n if (match === '$$') {\n // escape sequence\n return '$';\n }\n\n if (typeof values[identifier] === 'undefined') {\n return match;\n }\n\n const value = '' + values[identifier];\n\n if (identifier === 'RepresentationID') {\n // Format tag shall not be present with RepresentationID\n return value;\n }\n\n if (!format) {\n width = 1;\n } else {\n width = parseInt(width, 10);\n }\n\n if (value.length >= width) {\n return value;\n }\n\n return `${new Array(width - value.length + 1).join('0')}${value}`;\n};\n/**\n * Constructs a segment url from a template string\n *\n * @param {string} url\n * Template string to construct url from\n * @param {Obect} values\n * Object containing values that shall be used to replace known identifiers\n * @param {number} values.RepresentationID\n * Value of the Representation@id attribute\n * @param {number} values.Number\n * Number of the corresponding segment\n * @param {number} values.Bandwidth\n * Value of the Representation@bandwidth attribute.\n * @param {number} values.Time\n * Timestamp value of the corresponding segment\n * @return {string}\n * Segment url with identifiers replaced\n */\n\nconst constructTemplateUrl = (url, values) => url.replace(identifierPattern, identifierReplacement(values));\n/**\n * Generates a list of objects containing timing and duration information about each\n * segment needed to generate segment uris and the complete segment object\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]|undefined} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n * the SegmentTimeline element\n * @return {{number: number, duration: number, time: number, timeline: number}[]}\n * List of Objects with segment timing and duration info\n */\n\nconst parseTemplateInfo = (attributes, segmentTimeline) => {\n if (!attributes.duration && !segmentTimeline) {\n // if neither @duration or SegmentTimeline are present, then there shall be exactly\n // one media segment\n return [{\n number: attributes.startNumber || 1,\n duration: attributes.sourceDuration,\n time: 0,\n timeline: attributes.periodStart\n }];\n }\n\n if (attributes.duration) {\n return parseByDuration(attributes);\n }\n\n return parseByTimeline(attributes, segmentTimeline);\n};\n/**\n * Generates a list of segments using information provided by the SegmentTemplate element\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]|undefined} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n * the SegmentTimeline element\n * @return {Object[]}\n * List of segment objects\n */\n\nconst segmentsFromTemplate = (attributes, segmentTimeline) => {\n const templateValues = {\n RepresentationID: attributes.id,\n Bandwidth: attributes.bandwidth || 0\n };\n const {\n initialization = {\n sourceURL: '',\n range: ''\n }\n } = attributes;\n const mapSegment = urlTypeToSegment({\n baseUrl: attributes.baseUrl,\n source: constructTemplateUrl(initialization.sourceURL, templateValues),\n range: initialization.range\n });\n const segments = parseTemplateInfo(attributes, segmentTimeline);\n return segments.map(segment => {\n templateValues.Number = segment.number;\n templateValues.Time = segment.time;\n const uri = constructTemplateUrl(attributes.media || '', templateValues); // See DASH spec section 5.3.9.2.2\n // - if timescale isn't present on any level, default to 1.\n\n const timescale = attributes.timescale || 1; // - if presentationTimeOffset isn't present on any level, default to 0\n\n const presentationTimeOffset = attributes.presentationTimeOffset || 0;\n const presentationTime = // Even if the @t attribute is not specified for the segment, segment.time is\n // calculated in mpd-parser prior to this, so it's assumed to be available.\n attributes.periodStart + (segment.time - presentationTimeOffset) / timescale;\n const map = {\n uri,\n timeline: segment.timeline,\n duration: segment.duration,\n resolvedUri: resolveUrl(attributes.baseUrl || '', uri),\n map: mapSegment,\n number: segment.number,\n presentationTime\n };\n return map;\n });\n};\n\n/**\n * Converts a (of type URLType from the DASH spec 5.3.9.2 Table 14)\n * to an object that matches the output of a segment in videojs/mpd-parser\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object} segmentUrl\n * node to translate into a segment object\n * @return {Object} translated segment object\n */\n\nconst SegmentURLToSegmentObject = (attributes, segmentUrl) => {\n const {\n baseUrl,\n initialization = {}\n } = attributes;\n const initSegment = urlTypeToSegment({\n baseUrl,\n source: initialization.sourceURL,\n range: initialization.range\n });\n const segment = urlTypeToSegment({\n baseUrl,\n source: segmentUrl.media,\n range: segmentUrl.mediaRange\n });\n segment.map = initSegment;\n return segment;\n};\n/**\n * Generates a list of segments using information provided by the SegmentList element\n * SegmentList (DASH SPEC Section 5.3.9.3.2) contains a set of nodes. Each\n * node should be translated into segment.\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]|undefined} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n * the SegmentTimeline element\n * @return {Object.} list of segments\n */\n\n\nconst segmentsFromList = (attributes, segmentTimeline) => {\n const {\n duration,\n segmentUrls = [],\n periodStart\n } = attributes; // Per spec (5.3.9.2.1) no way to determine segment duration OR\n // if both SegmentTimeline and @duration are defined, it is outside of spec.\n\n if (!duration && !segmentTimeline || duration && segmentTimeline) {\n throw new Error(errors.SEGMENT_TIME_UNSPECIFIED);\n }\n\n const segmentUrlMap = segmentUrls.map(segmentUrlObject => SegmentURLToSegmentObject(attributes, segmentUrlObject));\n let segmentTimeInfo;\n\n if (duration) {\n segmentTimeInfo = parseByDuration(attributes);\n }\n\n if (segmentTimeline) {\n segmentTimeInfo = parseByTimeline(attributes, segmentTimeline);\n }\n\n const segments = segmentTimeInfo.map((segmentTime, index) => {\n if (segmentUrlMap[index]) {\n const segment = segmentUrlMap[index]; // See DASH spec section 5.3.9.2.2\n // - if timescale isn't present on any level, default to 1.\n\n const timescale = attributes.timescale || 1; // - if presentationTimeOffset isn't present on any level, default to 0\n\n const presentationTimeOffset = attributes.presentationTimeOffset || 0;\n segment.timeline = segmentTime.timeline;\n segment.duration = segmentTime.duration;\n segment.number = segmentTime.number;\n segment.presentationTime = periodStart + (segmentTime.time - presentationTimeOffset) / timescale;\n return segment;\n } // Since we're mapping we should get rid of any blank segments (in case\n // the given SegmentTimeline is handling for more elements than we have\n // SegmentURLs for).\n\n }).filter(segment => segment);\n return segments;\n};\n\nconst generateSegments = ({\n attributes,\n segmentInfo\n}) => {\n let segmentAttributes;\n let segmentsFn;\n\n if (segmentInfo.template) {\n segmentsFn = segmentsFromTemplate;\n segmentAttributes = merge(attributes, segmentInfo.template);\n } else if (segmentInfo.base) {\n segmentsFn = segmentsFromBase;\n segmentAttributes = merge(attributes, segmentInfo.base);\n } else if (segmentInfo.list) {\n segmentsFn = segmentsFromList;\n segmentAttributes = merge(attributes, segmentInfo.list);\n }\n\n const segmentsInfo = {\n attributes\n };\n\n if (!segmentsFn) {\n return segmentsInfo;\n }\n\n const segments = segmentsFn(segmentAttributes, segmentInfo.segmentTimeline); // The @duration attribute will be used to determin the playlist's targetDuration which\n // must be in seconds. Since we've generated the segment list, we no longer need\n // @duration to be in @timescale units, so we can convert it here.\n\n if (segmentAttributes.duration) {\n const {\n duration,\n timescale = 1\n } = segmentAttributes;\n segmentAttributes.duration = duration / timescale;\n } else if (segments.length) {\n // if there is no @duration attribute, use the largest segment duration as\n // as target duration\n segmentAttributes.duration = segments.reduce((max, segment) => {\n return Math.max(max, Math.ceil(segment.duration));\n }, 0);\n } else {\n segmentAttributes.duration = 0;\n }\n\n segmentsInfo.attributes = segmentAttributes;\n segmentsInfo.segments = segments; // This is a sidx box without actual segment information\n\n if (segmentInfo.base && segmentAttributes.indexRange) {\n segmentsInfo.sidx = segments[0];\n segmentsInfo.segments = [];\n }\n\n return segmentsInfo;\n};\nconst toPlaylists = representations => representations.map(generateSegments);\n\nconst findChildren = (element, name) => from(element.childNodes).filter(({\n tagName\n}) => tagName === name);\nconst getContent = element => element.textContent.trim();\n\n/**\n * Converts the provided string that may contain a division operation to a number.\n *\n * @param {string} value - the provided string value\n *\n * @return {number} the parsed string value\n */\nconst parseDivisionValue = value => {\n return parseFloat(value.split('/').reduce((prev, current) => prev / current));\n};\n\nconst parseDuration = str => {\n const SECONDS_IN_YEAR = 365 * 24 * 60 * 60;\n const SECONDS_IN_MONTH = 30 * 24 * 60 * 60;\n const SECONDS_IN_DAY = 24 * 60 * 60;\n const SECONDS_IN_HOUR = 60 * 60;\n const SECONDS_IN_MIN = 60; // P10Y10M10DT10H10M10.1S\n\n const durationRegex = /P(?:(\\d*)Y)?(?:(\\d*)M)?(?:(\\d*)D)?(?:T(?:(\\d*)H)?(?:(\\d*)M)?(?:([\\d.]*)S)?)?/;\n const match = durationRegex.exec(str);\n\n if (!match) {\n return 0;\n }\n\n const [year, month, day, hour, minute, second] = match.slice(1);\n return parseFloat(year || 0) * SECONDS_IN_YEAR + parseFloat(month || 0) * SECONDS_IN_MONTH + parseFloat(day || 0) * SECONDS_IN_DAY + parseFloat(hour || 0) * SECONDS_IN_HOUR + parseFloat(minute || 0) * SECONDS_IN_MIN + parseFloat(second || 0);\n};\nconst parseDate = str => {\n // Date format without timezone according to ISO 8601\n // YYY-MM-DDThh:mm:ss.ssssss\n const dateRegex = /^\\d+-\\d+-\\d+T\\d+:\\d+:\\d+(\\.\\d+)?$/; // If the date string does not specifiy a timezone, we must specifiy UTC. This is\n // expressed by ending with 'Z'\n\n if (dateRegex.test(str)) {\n str += 'Z';\n }\n\n return Date.parse(str);\n};\n\nconst parsers = {\n /**\n * Specifies the duration of the entire Media Presentation. Format is a duration string\n * as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n mediaPresentationDuration(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the Segment availability start time for all Segments referred to in this\n * MPD. For a dynamic manifest, it specifies the anchor for the earliest availability\n * time. Format is a date string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The date as seconds from unix epoch\n */\n availabilityStartTime(value) {\n return parseDate(value) / 1000;\n },\n\n /**\n * Specifies the smallest period between potential changes to the MPD. Format is a\n * duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n minimumUpdatePeriod(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the suggested presentation delay. Format is a\n * duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n suggestedPresentationDelay(value) {\n return parseDuration(value);\n },\n\n /**\n * specifices the type of mpd. Can be either \"static\" or \"dynamic\"\n *\n * @param {string} value\n * value of attribute as a string\n *\n * @return {string}\n * The type as a string\n */\n type(value) {\n return value;\n },\n\n /**\n * Specifies the duration of the smallest time shifting buffer for any Representation\n * in the MPD. Format is a duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n timeShiftBufferDepth(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the PeriodStart time of the Period relative to the availabilityStarttime.\n * Format is a duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n start(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the width of the visual presentation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed width\n */\n width(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the height of the visual presentation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed height\n */\n height(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the bitrate of the representation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed bandwidth\n */\n bandwidth(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the frame rate of the representation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed frame rate\n */\n frameRate(value) {\n return parseDivisionValue(value);\n },\n\n /**\n * Specifies the number of the first Media Segment in this Representation in the Period\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed number\n */\n startNumber(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the timescale in units per seconds\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed timescale\n */\n timescale(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the presentationTimeOffset.\n *\n * @param {string} value\n * value of the attribute as a string\n *\n * @return {number}\n * The parsed presentationTimeOffset\n */\n presentationTimeOffset(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the constant approximate Segment duration\n * NOTE: The element also contains an @duration attribute. This duration\n * specifies the duration of the Period. This attribute is currently not\n * supported by the rest of the parser, however we still check for it to prevent\n * errors.\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed duration\n */\n duration(value) {\n const parsedValue = parseInt(value, 10);\n\n if (isNaN(parsedValue)) {\n return parseDuration(value);\n }\n\n return parsedValue;\n },\n\n /**\n * Specifies the Segment duration, in units of the value of the @timescale.\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed duration\n */\n d(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the MPD start time, in @timescale units, the first Segment in the series\n * starts relative to the beginning of the Period\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed time\n */\n t(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the repeat count of the number of following contiguous Segments with the\n * same duration expressed by the value of @d\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed number\n */\n r(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the presentationTime.\n *\n * @param {string} value\n * value of the attribute as a string\n *\n * @return {number}\n * The parsed presentationTime\n */\n presentationTime(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Default parser for all other attributes. Acts as a no-op and just returns the value\n * as a string\n *\n * @param {string} value\n * value of attribute as a string\n * @return {string}\n * Unparsed value\n */\n DEFAULT(value) {\n return value;\n }\n\n};\n/**\n * Gets all the attributes and values of the provided node, parses attributes with known\n * types, and returns an object with attribute names mapped to values.\n *\n * @param {Node} el\n * The node to parse attributes from\n * @return {Object}\n * Object with all attributes of el parsed\n */\n\nconst parseAttributes = el => {\n if (!(el && el.attributes)) {\n return {};\n }\n\n return from(el.attributes).reduce((a, e) => {\n const parseFn = parsers[e.name] || parsers.DEFAULT;\n a[e.name] = parseFn(e.value);\n return a;\n }, {});\n};\n\nconst keySystemsMap = {\n 'urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b': 'org.w3.clearkey',\n 'urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed': 'com.widevine.alpha',\n 'urn:uuid:9a04f079-9840-4286-ab92-e65be0885f95': 'com.microsoft.playready',\n 'urn:uuid:f239e769-efa3-4850-9c16-a903c6932efb': 'com.adobe.primetime',\n // ISO_IEC 23009-1_2022 5.8.5.2.2 The mp4 Protection Scheme\n 'urn:mpeg:dash:mp4protection:2011': 'mp4protection'\n};\n/**\n * Builds a list of urls that is the product of the reference urls and BaseURL values\n *\n * @param {Object[]} references\n * List of objects containing the reference URL as well as its attributes\n * @param {Node[]} baseUrlElements\n * List of BaseURL nodes from the mpd\n * @return {Object[]}\n * List of objects with resolved urls and attributes\n */\n\nconst buildBaseUrls = (references, baseUrlElements) => {\n if (!baseUrlElements.length) {\n return references;\n }\n\n return flatten(references.map(function (reference) {\n return baseUrlElements.map(function (baseUrlElement) {\n const initialBaseUrl = getContent(baseUrlElement);\n const resolvedBaseUrl = resolveUrl(reference.baseUrl, initialBaseUrl);\n const finalBaseUrl = merge(parseAttributes(baseUrlElement), {\n baseUrl: resolvedBaseUrl\n }); // If the URL is resolved, we want to get the serviceLocation from the reference\n // assuming there is no serviceLocation on the initialBaseUrl\n\n if (resolvedBaseUrl !== initialBaseUrl && !finalBaseUrl.serviceLocation && reference.serviceLocation) {\n finalBaseUrl.serviceLocation = reference.serviceLocation;\n }\n\n return finalBaseUrl;\n });\n }));\n};\n/**\n * Contains all Segment information for its containing AdaptationSet\n *\n * @typedef {Object} SegmentInformation\n * @property {Object|undefined} template\n * Contains the attributes for the SegmentTemplate node\n * @property {Object[]|undefined} segmentTimeline\n * Contains a list of atrributes for each S node within the SegmentTimeline node\n * @property {Object|undefined} list\n * Contains the attributes for the SegmentList node\n * @property {Object|undefined} base\n * Contains the attributes for the SegmentBase node\n */\n\n/**\n * Returns all available Segment information contained within the AdaptationSet node\n *\n * @param {Node} adaptationSet\n * The AdaptationSet node to get Segment information from\n * @return {SegmentInformation}\n * The Segment information contained within the provided AdaptationSet\n */\n\nconst getSegmentInformation = adaptationSet => {\n const segmentTemplate = findChildren(adaptationSet, 'SegmentTemplate')[0];\n const segmentList = findChildren(adaptationSet, 'SegmentList')[0];\n const segmentUrls = segmentList && findChildren(segmentList, 'SegmentURL').map(s => merge({\n tag: 'SegmentURL'\n }, parseAttributes(s)));\n const segmentBase = findChildren(adaptationSet, 'SegmentBase')[0];\n const segmentTimelineParentNode = segmentList || segmentTemplate;\n const segmentTimeline = segmentTimelineParentNode && findChildren(segmentTimelineParentNode, 'SegmentTimeline')[0];\n const segmentInitializationParentNode = segmentList || segmentBase || segmentTemplate;\n const segmentInitialization = segmentInitializationParentNode && findChildren(segmentInitializationParentNode, 'Initialization')[0]; // SegmentTemplate is handled slightly differently, since it can have both\n // @initialization and an node. @initialization can be templated,\n // while the node can have a url and range specified. If the has\n // both @initialization and an subelement we opt to override with\n // the node, as this interaction is not defined in the spec.\n\n const template = segmentTemplate && parseAttributes(segmentTemplate);\n\n if (template && segmentInitialization) {\n template.initialization = segmentInitialization && parseAttributes(segmentInitialization);\n } else if (template && template.initialization) {\n // If it is @initialization we convert it to an object since this is the format that\n // later functions will rely on for the initialization segment. This is only valid\n // for \n template.initialization = {\n sourceURL: template.initialization\n };\n }\n\n const segmentInfo = {\n template,\n segmentTimeline: segmentTimeline && findChildren(segmentTimeline, 'S').map(s => parseAttributes(s)),\n list: segmentList && merge(parseAttributes(segmentList), {\n segmentUrls,\n initialization: parseAttributes(segmentInitialization)\n }),\n base: segmentBase && merge(parseAttributes(segmentBase), {\n initialization: parseAttributes(segmentInitialization)\n })\n };\n Object.keys(segmentInfo).forEach(key => {\n if (!segmentInfo[key]) {\n delete segmentInfo[key];\n }\n });\n return segmentInfo;\n};\n/**\n * Contains Segment information and attributes needed to construct a Playlist object\n * from a Representation\n *\n * @typedef {Object} RepresentationInformation\n * @property {SegmentInformation} segmentInfo\n * Segment information for this Representation\n * @property {Object} attributes\n * Inherited attributes for this Representation\n */\n\n/**\n * Maps a Representation node to an object containing Segment information and attributes\n *\n * @name inheritBaseUrlsCallback\n * @function\n * @param {Node} representation\n * Representation node from the mpd\n * @return {RepresentationInformation}\n * Representation information needed to construct a Playlist object\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping Representation nodes to\n * Segment information and attributes using inherited BaseURL nodes.\n *\n * @param {Object} adaptationSetAttributes\n * Contains attributes inherited by the AdaptationSet\n * @param {Object[]} adaptationSetBaseUrls\n * List of objects containing resolved base URLs and attributes\n * inherited by the AdaptationSet\n * @param {SegmentInformation} adaptationSetSegmentInfo\n * Contains Segment information for the AdaptationSet\n * @return {inheritBaseUrlsCallback}\n * Callback map function\n */\n\nconst inheritBaseUrls = (adaptationSetAttributes, adaptationSetBaseUrls, adaptationSetSegmentInfo) => representation => {\n const repBaseUrlElements = findChildren(representation, 'BaseURL');\n const repBaseUrls = buildBaseUrls(adaptationSetBaseUrls, repBaseUrlElements);\n const attributes = merge(adaptationSetAttributes, parseAttributes(representation));\n const representationSegmentInfo = getSegmentInformation(representation);\n return repBaseUrls.map(baseUrl => {\n return {\n segmentInfo: merge(adaptationSetSegmentInfo, representationSegmentInfo),\n attributes: merge(attributes, baseUrl)\n };\n });\n};\n/**\n * Tranforms a series of content protection nodes to\n * an object containing pssh data by key system\n *\n * @param {Node[]} contentProtectionNodes\n * Content protection nodes\n * @return {Object}\n * Object containing pssh data by key system\n */\n\nconst generateKeySystemInformation = contentProtectionNodes => {\n return contentProtectionNodes.reduce((acc, node) => {\n const attributes = parseAttributes(node); // Although it could be argued that according to the UUID RFC spec the UUID string (a-f chars) should be generated\n // as a lowercase string it also mentions it should be treated as case-insensitive on input. Since the key system\n // UUIDs in the keySystemsMap are hardcoded as lowercase in the codebase there isn't any reason not to do\n // .toLowerCase() on the input UUID string from the manifest (at least I could not think of one).\n\n if (attributes.schemeIdUri) {\n attributes.schemeIdUri = attributes.schemeIdUri.toLowerCase();\n }\n\n const keySystem = keySystemsMap[attributes.schemeIdUri];\n\n if (keySystem) {\n acc[keySystem] = {\n attributes\n };\n const psshNode = findChildren(node, 'cenc:pssh')[0];\n\n if (psshNode) {\n const pssh = getContent(psshNode);\n acc[keySystem].pssh = pssh && decodeB64ToUint8Array(pssh);\n }\n }\n\n return acc;\n }, {});\n}; // defined in ANSI_SCTE 214-1 2016\n\n\nconst parseCaptionServiceMetadata = service => {\n // 608 captions\n if (service.schemeIdUri === 'urn:scte:dash:cc:cea-608:2015') {\n const values = typeof service.value !== 'string' ? [] : service.value.split(';');\n return values.map(value => {\n let channel;\n let language; // default language to value\n\n language = value;\n\n if (/^CC\\d=/.test(value)) {\n [channel, language] = value.split('=');\n } else if (/^CC\\d$/.test(value)) {\n channel = value;\n }\n\n return {\n channel,\n language\n };\n });\n } else if (service.schemeIdUri === 'urn:scte:dash:cc:cea-708:2015') {\n const values = typeof service.value !== 'string' ? [] : service.value.split(';');\n return values.map(value => {\n const flags = {\n // service or channel number 1-63\n 'channel': undefined,\n // language is a 3ALPHA per ISO 639.2/B\n // field is required\n 'language': undefined,\n // BIT 1/0 or ?\n // default value is 1, meaning 16:9 aspect ratio, 0 is 4:3, ? is unknown\n 'aspectRatio': 1,\n // BIT 1/0\n // easy reader flag indicated the text is tailed to the needs of beginning readers\n // default 0, or off\n 'easyReader': 0,\n // BIT 1/0\n // If 3d metadata is present (CEA-708.1) then 1\n // default 0\n '3D': 0\n };\n\n if (/=/.test(value)) {\n const [channel, opts = ''] = value.split('=');\n flags.channel = channel;\n flags.language = value;\n opts.split(',').forEach(opt => {\n const [name, val] = opt.split(':');\n\n if (name === 'lang') {\n flags.language = val; // er for easyReadery\n } else if (name === 'er') {\n flags.easyReader = Number(val); // war for wide aspect ratio\n } else if (name === 'war') {\n flags.aspectRatio = Number(val);\n } else if (name === '3D') {\n flags['3D'] = Number(val);\n }\n });\n } else {\n flags.language = value;\n }\n\n if (flags.channel) {\n flags.channel = 'SERVICE' + flags.channel;\n }\n\n return flags;\n });\n }\n};\n/**\n * A map callback that will parse all event stream data for a collection of periods\n * DASH ISO_IEC_23009 5.10.2.2\n * https://dashif-documents.azurewebsites.net/Events/master/event.html#mpd-event-timing\n *\n * @param {PeriodInformation} period object containing necessary period information\n * @return a collection of parsed eventstream event objects\n */\n\nconst toEventStream = period => {\n // get and flatten all EventStreams tags and parse attributes and children\n return flatten(findChildren(period.node, 'EventStream').map(eventStream => {\n const eventStreamAttributes = parseAttributes(eventStream);\n const schemeIdUri = eventStreamAttributes.schemeIdUri; // find all Events per EventStream tag and map to return objects\n\n return findChildren(eventStream, 'Event').map(event => {\n const eventAttributes = parseAttributes(event);\n const presentationTime = eventAttributes.presentationTime || 0;\n const timescale = eventStreamAttributes.timescale || 1;\n const duration = eventAttributes.duration || 0;\n const start = presentationTime / timescale + period.attributes.start;\n return {\n schemeIdUri,\n value: eventStreamAttributes.value,\n id: eventAttributes.id,\n start,\n end: start + duration / timescale,\n messageData: getContent(event) || eventAttributes.messageData,\n contentEncoding: eventStreamAttributes.contentEncoding,\n presentationTimeOffset: eventStreamAttributes.presentationTimeOffset || 0\n };\n });\n }));\n};\n/**\n * Maps an AdaptationSet node to a list of Representation information objects\n *\n * @name toRepresentationsCallback\n * @function\n * @param {Node} adaptationSet\n * AdaptationSet node from the mpd\n * @return {RepresentationInformation[]}\n * List of objects containing Representaion information\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping AdaptationSet nodes to a list of\n * Representation information objects\n *\n * @param {Object} periodAttributes\n * Contains attributes inherited by the Period\n * @param {Object[]} periodBaseUrls\n * Contains list of objects with resolved base urls and attributes\n * inherited by the Period\n * @param {string[]} periodSegmentInfo\n * Contains Segment Information at the period level\n * @return {toRepresentationsCallback}\n * Callback map function\n */\n\nconst toRepresentations = (periodAttributes, periodBaseUrls, periodSegmentInfo) => adaptationSet => {\n const adaptationSetAttributes = parseAttributes(adaptationSet);\n const adaptationSetBaseUrls = buildBaseUrls(periodBaseUrls, findChildren(adaptationSet, 'BaseURL'));\n const role = findChildren(adaptationSet, 'Role')[0];\n const roleAttributes = {\n role: parseAttributes(role)\n };\n let attrs = merge(periodAttributes, adaptationSetAttributes, roleAttributes);\n const accessibility = findChildren(adaptationSet, 'Accessibility')[0];\n const captionServices = parseCaptionServiceMetadata(parseAttributes(accessibility));\n\n if (captionServices) {\n attrs = merge(attrs, {\n captionServices\n });\n }\n\n const label = findChildren(adaptationSet, 'Label')[0];\n\n if (label && label.childNodes.length) {\n const labelVal = label.childNodes[0].nodeValue.trim();\n attrs = merge(attrs, {\n label: labelVal\n });\n }\n\n const contentProtection = generateKeySystemInformation(findChildren(adaptationSet, 'ContentProtection'));\n\n if (Object.keys(contentProtection).length) {\n attrs = merge(attrs, {\n contentProtection\n });\n }\n\n const segmentInfo = getSegmentInformation(adaptationSet);\n const representations = findChildren(adaptationSet, 'Representation');\n const adaptationSetSegmentInfo = merge(periodSegmentInfo, segmentInfo);\n return flatten(representations.map(inheritBaseUrls(attrs, adaptationSetBaseUrls, adaptationSetSegmentInfo)));\n};\n/**\n * Contains all period information for mapping nodes onto adaptation sets.\n *\n * @typedef {Object} PeriodInformation\n * @property {Node} period.node\n * Period node from the mpd\n * @property {Object} period.attributes\n * Parsed period attributes from node plus any added\n */\n\n/**\n * Maps a PeriodInformation object to a list of Representation information objects for all\n * AdaptationSet nodes contained within the Period.\n *\n * @name toAdaptationSetsCallback\n * @function\n * @param {PeriodInformation} period\n * Period object containing necessary period information\n * @param {number} periodStart\n * Start time of the Period within the mpd\n * @return {RepresentationInformation[]}\n * List of objects containing Representaion information\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping Period nodes to a list of\n * Representation information objects\n *\n * @param {Object} mpdAttributes\n * Contains attributes inherited by the mpd\n * @param {Object[]} mpdBaseUrls\n * Contains list of objects with resolved base urls and attributes\n * inherited by the mpd\n * @return {toAdaptationSetsCallback}\n * Callback map function\n */\n\nconst toAdaptationSets = (mpdAttributes, mpdBaseUrls) => (period, index) => {\n const periodBaseUrls = buildBaseUrls(mpdBaseUrls, findChildren(period.node, 'BaseURL'));\n const periodAttributes = merge(mpdAttributes, {\n periodStart: period.attributes.start\n });\n\n if (typeof period.attributes.duration === 'number') {\n periodAttributes.periodDuration = period.attributes.duration;\n }\n\n const adaptationSets = findChildren(period.node, 'AdaptationSet');\n const periodSegmentInfo = getSegmentInformation(period.node);\n return flatten(adaptationSets.map(toRepresentations(periodAttributes, periodBaseUrls, periodSegmentInfo)));\n};\n/**\n * Tranforms an array of content steering nodes into an object\n * containing CDN content steering information from the MPD manifest.\n *\n * For more information on the DASH spec for Content Steering parsing, see:\n * https://dashif.org/docs/DASH-IF-CTS-00XX-Content-Steering-Community-Review.pdf\n *\n * @param {Node[]} contentSteeringNodes\n * Content steering nodes\n * @param {Function} eventHandler\n * The event handler passed into the parser options to handle warnings\n * @return {Object}\n * Object containing content steering data\n */\n\nconst generateContentSteeringInformation = (contentSteeringNodes, eventHandler) => {\n // If there are more than one ContentSteering tags, throw an error\n if (contentSteeringNodes.length > 1) {\n eventHandler({\n type: 'warn',\n message: 'The MPD manifest should contain no more than one ContentSteering tag'\n });\n } // Return a null value if there are no ContentSteering tags\n\n\n if (!contentSteeringNodes.length) {\n return null;\n }\n\n const infoFromContentSteeringTag = merge({\n serverURL: getContent(contentSteeringNodes[0])\n }, parseAttributes(contentSteeringNodes[0])); // Converts `queryBeforeStart` to a boolean, as well as setting the default value\n // to `false` if it doesn't exist\n\n infoFromContentSteeringTag.queryBeforeStart = infoFromContentSteeringTag.queryBeforeStart === 'true';\n return infoFromContentSteeringTag;\n};\n/**\n * Gets Period@start property for a given period.\n *\n * @param {Object} options\n * Options object\n * @param {Object} options.attributes\n * Period attributes\n * @param {Object} [options.priorPeriodAttributes]\n * Prior period attributes (if prior period is available)\n * @param {string} options.mpdType\n * The MPD@type these periods came from\n * @return {number|null}\n * The period start, or null if it's an early available period or error\n */\n\nconst getPeriodStart = ({\n attributes,\n priorPeriodAttributes,\n mpdType\n}) => {\n // Summary of period start time calculation from DASH spec section 5.3.2.1\n //\n // A period's start is the first period's start + time elapsed after playing all\n // prior periods to this one. Periods continue one after the other in time (without\n // gaps) until the end of the presentation.\n //\n // The value of Period@start should be:\n // 1. if Period@start is present: value of Period@start\n // 2. if previous period exists and it has @duration: previous Period@start +\n // previous Period@duration\n // 3. if this is first period and MPD@type is 'static': 0\n // 4. in all other cases, consider the period an \"early available period\" (note: not\n // currently supported)\n // (1)\n if (typeof attributes.start === 'number') {\n return attributes.start;\n } // (2)\n\n\n if (priorPeriodAttributes && typeof priorPeriodAttributes.start === 'number' && typeof priorPeriodAttributes.duration === 'number') {\n return priorPeriodAttributes.start + priorPeriodAttributes.duration;\n } // (3)\n\n\n if (!priorPeriodAttributes && mpdType === 'static') {\n return 0;\n } // (4)\n // There is currently no logic for calculating the Period@start value if there is\n // no Period@start or prior Period@start and Period@duration available. This is not made\n // explicit by the DASH interop guidelines or the DASH spec, however, since there's\n // nothing about any other resolution strategies, it's implied. Thus, this case should\n // be considered an early available period, or error, and null should suffice for both\n // of those cases.\n\n\n return null;\n};\n/**\n * Traverses the mpd xml tree to generate a list of Representation information objects\n * that have inherited attributes from parent nodes\n *\n * @param {Node} mpd\n * The root node of the mpd\n * @param {Object} options\n * Available options for inheritAttributes\n * @param {string} options.manifestUri\n * The uri source of the mpd\n * @param {number} options.NOW\n * Current time per DASH IOP. Default is current time in ms since epoch\n * @param {number} options.clientOffset\n * Client time difference from NOW (in milliseconds)\n * @return {RepresentationInformation[]}\n * List of objects containing Representation information\n */\n\nconst inheritAttributes = (mpd, options = {}) => {\n const {\n manifestUri = '',\n NOW = Date.now(),\n clientOffset = 0,\n // TODO: For now, we are expecting an eventHandler callback function\n // to be passed into the mpd parser as an option.\n // In the future, we should enable stream parsing by using the Stream class from vhs-utils.\n // This will support new features including a standardized event handler.\n // See the m3u8 parser for examples of how stream parsing is currently used for HLS parsing.\n // https://github.com/videojs/vhs-utils/blob/88d6e10c631e57a5af02c5a62bc7376cd456b4f5/src/stream.js#L9\n eventHandler = function () {}\n } = options;\n const periodNodes = findChildren(mpd, 'Period');\n\n if (!periodNodes.length) {\n throw new Error(errors.INVALID_NUMBER_OF_PERIOD);\n }\n\n const locations = findChildren(mpd, 'Location');\n const mpdAttributes = parseAttributes(mpd);\n const mpdBaseUrls = buildBaseUrls([{\n baseUrl: manifestUri\n }], findChildren(mpd, 'BaseURL'));\n const contentSteeringNodes = findChildren(mpd, 'ContentSteering'); // See DASH spec section 5.3.1.2, Semantics of MPD element. Default type to 'static'.\n\n mpdAttributes.type = mpdAttributes.type || 'static';\n mpdAttributes.sourceDuration = mpdAttributes.mediaPresentationDuration || 0;\n mpdAttributes.NOW = NOW;\n mpdAttributes.clientOffset = clientOffset;\n\n if (locations.length) {\n mpdAttributes.locations = locations.map(getContent);\n }\n\n const periods = []; // Since toAdaptationSets acts on individual periods right now, the simplest approach to\n // adding properties that require looking at prior periods is to parse attributes and add\n // missing ones before toAdaptationSets is called. If more such properties are added, it\n // may be better to refactor toAdaptationSets.\n\n periodNodes.forEach((node, index) => {\n const attributes = parseAttributes(node); // Use the last modified prior period, as it may contain added information necessary\n // for this period.\n\n const priorPeriod = periods[index - 1];\n attributes.start = getPeriodStart({\n attributes,\n priorPeriodAttributes: priorPeriod ? priorPeriod.attributes : null,\n mpdType: mpdAttributes.type\n });\n periods.push({\n node,\n attributes\n });\n });\n return {\n locations: mpdAttributes.locations,\n contentSteeringInfo: generateContentSteeringInformation(contentSteeringNodes, eventHandler),\n // TODO: There are occurences where this `representationInfo` array contains undesired\n // duplicates. This generally occurs when there are multiple BaseURL nodes that are\n // direct children of the MPD node. When we attempt to resolve URLs from a combination of the\n // parent BaseURL and a child BaseURL, and the value does not resolve,\n // we end up returning the child BaseURL multiple times.\n // We need to determine a way to remove these duplicates in a safe way.\n // See: https://github.com/videojs/mpd-parser/pull/17#discussion_r162750527\n representationInfo: flatten(periods.map(toAdaptationSets(mpdAttributes, mpdBaseUrls))),\n eventStream: flatten(periods.map(toEventStream))\n };\n};\n\nconst stringToMpdXml = manifestString => {\n if (manifestString === '') {\n throw new Error(errors.DASH_EMPTY_MANIFEST);\n }\n\n const parser = new DOMParser();\n let xml;\n let mpd;\n\n try {\n xml = parser.parseFromString(manifestString, 'application/xml');\n mpd = xml && xml.documentElement.tagName === 'MPD' ? xml.documentElement : null;\n } catch (e) {// ie 11 throws on invalid xml\n }\n\n if (!mpd || mpd && mpd.getElementsByTagName('parsererror').length > 0) {\n throw new Error(errors.DASH_INVALID_XML);\n }\n\n return mpd;\n};\n\n/**\n * Parses the manifest for a UTCTiming node, returning the nodes attributes if found\n *\n * @param {string} mpd\n * XML string of the MPD manifest\n * @return {Object|null}\n * Attributes of UTCTiming node specified in the manifest. Null if none found\n */\n\nconst parseUTCTimingScheme = mpd => {\n const UTCTimingNode = findChildren(mpd, 'UTCTiming')[0];\n\n if (!UTCTimingNode) {\n return null;\n }\n\n const attributes = parseAttributes(UTCTimingNode);\n\n switch (attributes.schemeIdUri) {\n case 'urn:mpeg:dash:utc:http-head:2014':\n case 'urn:mpeg:dash:utc:http-head:2012':\n attributes.method = 'HEAD';\n break;\n\n case 'urn:mpeg:dash:utc:http-xsdate:2014':\n case 'urn:mpeg:dash:utc:http-iso:2014':\n case 'urn:mpeg:dash:utc:http-xsdate:2012':\n case 'urn:mpeg:dash:utc:http-iso:2012':\n attributes.method = 'GET';\n break;\n\n case 'urn:mpeg:dash:utc:direct:2014':\n case 'urn:mpeg:dash:utc:direct:2012':\n attributes.method = 'DIRECT';\n attributes.value = Date.parse(attributes.value);\n break;\n\n case 'urn:mpeg:dash:utc:http-ntp:2014':\n case 'urn:mpeg:dash:utc:ntp:2014':\n case 'urn:mpeg:dash:utc:sntp:2014':\n default:\n throw new Error(errors.UNSUPPORTED_UTC_TIMING_SCHEME);\n }\n\n return attributes;\n};\n\nconst VERSION = version;\n/*\n * Given a DASH manifest string and options, parses the DASH manifest into an object in the\n * form outputed by m3u8-parser and accepted by videojs/http-streaming.\n *\n * For live DASH manifests, if `previousManifest` is provided in options, then the newly\n * parsed DASH manifest will have its media sequence and discontinuity sequence values\n * updated to reflect its position relative to the prior manifest.\n *\n * @param {string} manifestString - the DASH manifest as a string\n * @param {options} [options] - any options\n *\n * @return {Object} the manifest object\n */\n\nconst parse = (manifestString, options = {}) => {\n const parsedManifestInfo = inheritAttributes(stringToMpdXml(manifestString), options);\n const playlists = toPlaylists(parsedManifestInfo.representationInfo);\n return toM3u8({\n dashPlaylists: playlists,\n locations: parsedManifestInfo.locations,\n contentSteering: parsedManifestInfo.contentSteeringInfo,\n sidxMapping: options.sidxMapping,\n previousManifest: options.previousManifest,\n eventStream: parsedManifestInfo.eventStream\n });\n};\n/**\n * Parses the manifest for a UTCTiming node, returning the nodes attributes if found\n *\n * @param {string} manifestString\n * XML string of the MPD manifest\n * @return {Object|null}\n * Attributes of UTCTiming node specified in the manifest. Null if none found\n */\n\n\nconst parseUTCTiming = manifestString => parseUTCTimingScheme(stringToMpdXml(manifestString));\n\nexport { VERSION, addSidxSegmentsToPlaylist$1 as addSidxSegmentsToPlaylist, generateSidxKey, inheritAttributes, parse, parseUTCTiming, stringToMpdXml, toM3u8, toPlaylists };\n", "/**\n * Loops through all supported media groups in master and calls the provided\n * callback for each group\n *\n * @param {Object} master\n * The parsed master manifest object\n * @param {string[]} groups\n * The media groups to call the callback for\n * @param {Function} callback\n * Callback to call for each media group\n */\nexport var forEachMediaGroup = function forEachMediaGroup(master, groups, callback) {\n groups.forEach(function (mediaType) {\n for (var groupKey in master.mediaGroups[mediaType]) {\n for (var labelKey in master.mediaGroups[mediaType][groupKey]) {\n var mediaProperties = master.mediaGroups[mediaType][groupKey][labelKey];\n callback(mediaProperties, mediaType, groupKey, labelKey);\n }\n }\n });\n};", "import window from 'global/window';\n\nvar atob = function atob(s) {\n return window.atob ? window.atob(s) : Buffer.from(s, 'base64').toString('binary');\n};\n\nexport default function decodeB64ToUint8Array(b64Text) {\n var decodedString = atob(b64Text);\n var array = new Uint8Array(decodedString.length);\n\n for (var i = 0; i < decodedString.length; i++) {\n array[i] = decodedString.charCodeAt(i);\n }\n\n return array;\n}", "import { toUint8, bytesMatch } from './byte-helpers.js';\nvar ID3 = toUint8([0x49, 0x44, 0x33]);\nexport var getId3Size = function getId3Size(bytes, offset) {\n if (offset === void 0) {\n offset = 0;\n }\n\n bytes = toUint8(bytes);\n var flags = bytes[offset + 5];\n var returnSize = bytes[offset + 6] << 21 | bytes[offset + 7] << 14 | bytes[offset + 8] << 7 | bytes[offset + 9];\n var footerPresent = (flags & 16) >> 4;\n\n if (footerPresent) {\n return returnSize + 20;\n }\n\n return returnSize + 10;\n};\nexport var getId3Offset = function getId3Offset(bytes, offset) {\n if (offset === void 0) {\n offset = 0;\n }\n\n bytes = toUint8(bytes);\n\n if (bytes.length - offset < 10 || !bytesMatch(bytes, ID3, {\n offset: offset\n })) {\n return offset;\n }\n\n offset += getId3Size(bytes, offset); // recursive check for id3 tags as some files\n // have multiple ID3 tag sections even though\n // they should not.\n\n return getId3Offset(bytes, offset);\n};", "export var OPUS_HEAD = new Uint8Array([// O, p, u, s\n0x4f, 0x70, 0x75, 0x73, // H, e, a, d\n0x48, 0x65, 0x61, 0x64]); // https://wiki.xiph.org/OggOpus\n// https://vfrmaniac.fushizen.eu/contents/opus_in_isobmff.html\n// https://opus-codec.org/docs/opusfile_api-0.7/structOpusHead.html\n\nexport var parseOpusHead = function parseOpusHead(bytes) {\n var view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength);\n var version = view.getUint8(0); // version 0, from mp4, does not use littleEndian.\n\n var littleEndian = version !== 0;\n var config = {\n version: version,\n channels: view.getUint8(1),\n preSkip: view.getUint16(2, littleEndian),\n sampleRate: view.getUint32(4, littleEndian),\n outputGain: view.getUint16(8, littleEndian),\n channelMappingFamily: view.getUint8(10)\n };\n\n if (config.channelMappingFamily > 0 && bytes.length > 10) {\n config.streamCount = view.getUint8(11);\n config.twoChannelStreamCount = view.getUint8(12);\n config.channelMapping = [];\n\n for (var c = 0; c < config.channels; c++) {\n config.channelMapping.push(view.getUint8(13 + c));\n }\n }\n\n return config;\n};\nexport var setOpusHead = function setOpusHead(config) {\n var size = config.channelMappingFamily <= 0 ? 11 : 12 + config.channels;\n var view = new DataView(new ArrayBuffer(size));\n var littleEndian = config.version !== 0;\n view.setUint8(0, config.version);\n view.setUint8(1, config.channels);\n view.setUint16(2, config.preSkip, littleEndian);\n view.setUint32(4, config.sampleRate, littleEndian);\n view.setUint16(8, config.outputGain, littleEndian);\n view.setUint8(10, config.channelMappingFamily);\n\n if (config.channelMappingFamily > 0) {\n view.setUint8(11, config.streamCount);\n config.channelMapping.foreach(function (cm, i) {\n view.setUint8(12 + i, cm);\n });\n }\n\n return new Uint8Array(view.buffer);\n};", "import { stringToBytes, toUint8, bytesMatch, bytesToString, toHexString, padStart, bytesToNumber } from './byte-helpers.js';\nimport { getAvcCodec, getHvcCodec, getAv1Codec } from './codec-helpers.js';\nimport { parseOpusHead } from './opus-helpers.js';\n\nvar normalizePath = function normalizePath(path) {\n if (typeof path === 'string') {\n return stringToBytes(path);\n }\n\n if (typeof path === 'number') {\n return path;\n }\n\n return path;\n};\n\nvar normalizePaths = function normalizePaths(paths) {\n if (!Array.isArray(paths)) {\n return [normalizePath(paths)];\n }\n\n return paths.map(function (p) {\n return normalizePath(p);\n });\n};\n\nvar DESCRIPTORS;\nexport var parseDescriptors = function parseDescriptors(bytes) {\n bytes = toUint8(bytes);\n var results = [];\n var i = 0;\n\n while (bytes.length > i) {\n var tag = bytes[i];\n var size = 0;\n var headerSize = 0; // tag\n\n headerSize++;\n var byte = bytes[headerSize]; // first byte\n\n headerSize++;\n\n while (byte & 0x80) {\n size = (byte & 0x7F) << 7;\n byte = bytes[headerSize];\n headerSize++;\n }\n\n size += byte & 0x7F;\n\n for (var z = 0; z < DESCRIPTORS.length; z++) {\n var _DESCRIPTORS$z = DESCRIPTORS[z],\n id = _DESCRIPTORS$z.id,\n parser = _DESCRIPTORS$z.parser;\n\n if (tag === id) {\n results.push(parser(bytes.subarray(headerSize, headerSize + size)));\n break;\n }\n }\n\n i += size + headerSize;\n }\n\n return results;\n};\nDESCRIPTORS = [{\n id: 0x03,\n parser: function parser(bytes) {\n var desc = {\n tag: 0x03,\n id: bytes[0] << 8 | bytes[1],\n flags: bytes[2],\n size: 3,\n dependsOnEsId: 0,\n ocrEsId: 0,\n descriptors: [],\n url: ''\n }; // depends on es id\n\n if (desc.flags & 0x80) {\n desc.dependsOnEsId = bytes[desc.size] << 8 | bytes[desc.size + 1];\n desc.size += 2;\n } // url\n\n\n if (desc.flags & 0x40) {\n var len = bytes[desc.size];\n desc.url = bytesToString(bytes.subarray(desc.size + 1, desc.size + 1 + len));\n desc.size += len;\n } // ocr es id\n\n\n if (desc.flags & 0x20) {\n desc.ocrEsId = bytes[desc.size] << 8 | bytes[desc.size + 1];\n desc.size += 2;\n }\n\n desc.descriptors = parseDescriptors(bytes.subarray(desc.size)) || [];\n return desc;\n }\n}, {\n id: 0x04,\n parser: function parser(bytes) {\n // DecoderConfigDescriptor\n var desc = {\n tag: 0x04,\n oti: bytes[0],\n streamType: bytes[1],\n bufferSize: bytes[2] << 16 | bytes[3] << 8 | bytes[4],\n maxBitrate: bytes[5] << 24 | bytes[6] << 16 | bytes[7] << 8 | bytes[8],\n avgBitrate: bytes[9] << 24 | bytes[10] << 16 | bytes[11] << 8 | bytes[12],\n descriptors: parseDescriptors(bytes.subarray(13))\n };\n return desc;\n }\n}, {\n id: 0x05,\n parser: function parser(bytes) {\n // DecoderSpecificInfo\n return {\n tag: 0x05,\n bytes: bytes\n };\n }\n}, {\n id: 0x06,\n parser: function parser(bytes) {\n // SLConfigDescriptor\n return {\n tag: 0x06,\n bytes: bytes\n };\n }\n}];\n/**\n * find any number of boxes by name given a path to it in an iso bmff\n * such as mp4.\n *\n * @param {TypedArray} bytes\n * bytes for the iso bmff to search for boxes in\n *\n * @param {Uint8Array[]|string[]|string|Uint8Array} name\n * An array of paths or a single path representing the name\n * of boxes to search through in bytes. Paths may be\n * uint8 (character codes) or strings.\n *\n * @param {boolean} [complete=false]\n * Should we search only for complete boxes on the final path.\n * This is very useful when you do not want to get back partial boxes\n * in the case of streaming files.\n *\n * @return {Uint8Array[]}\n * An array of the end paths that we found.\n */\n\nexport var findBox = function findBox(bytes, paths, complete) {\n if (complete === void 0) {\n complete = false;\n }\n\n paths = normalizePaths(paths);\n bytes = toUint8(bytes);\n var results = [];\n\n if (!paths.length) {\n // short-circuit the search for empty paths\n return results;\n }\n\n var i = 0;\n\n while (i < bytes.length) {\n var size = (bytes[i] << 24 | bytes[i + 1] << 16 | bytes[i + 2] << 8 | bytes[i + 3]) >>> 0;\n var type = bytes.subarray(i + 4, i + 8); // invalid box format.\n\n if (size === 0) {\n break;\n }\n\n var end = i + size;\n\n if (end > bytes.length) {\n // this box is bigger than the number of bytes we have\n // and complete is set, we cannot find any more boxes.\n if (complete) {\n break;\n }\n\n end = bytes.length;\n }\n\n var data = bytes.subarray(i + 8, end);\n\n if (bytesMatch(type, paths[0])) {\n if (paths.length === 1) {\n // this is the end of the path and we've found the box we were\n // looking for\n results.push(data);\n } else {\n // recursively search for the next box along the path\n results.push.apply(results, findBox(data, paths.slice(1), complete));\n }\n }\n\n i = end;\n } // we've finished searching all of bytes\n\n\n return results;\n};\n/**\n * Search for a single matching box by name in an iso bmff format like\n * mp4. This function is useful for finding codec boxes which\n * can be placed arbitrarily in sample descriptions depending\n * on the version of the file or file type.\n *\n * @param {TypedArray} bytes\n * bytes for the iso bmff to search for boxes in\n *\n * @param {string|Uint8Array} name\n * The name of the box to find.\n *\n * @return {Uint8Array[]}\n * a subarray of bytes representing the name boxed we found.\n */\n\nexport var findNamedBox = function findNamedBox(bytes, name) {\n name = normalizePath(name);\n\n if (!name.length) {\n // short-circuit the search for empty paths\n return bytes.subarray(bytes.length);\n }\n\n var i = 0;\n\n while (i < bytes.length) {\n if (bytesMatch(bytes.subarray(i, i + name.length), name)) {\n var size = (bytes[i - 4] << 24 | bytes[i - 3] << 16 | bytes[i - 2] << 8 | bytes[i - 1]) >>> 0;\n var end = size > 1 ? i + size : bytes.byteLength;\n return bytes.subarray(i + 4, end);\n }\n\n i++;\n } // we've finished searching all of bytes\n\n\n return bytes.subarray(bytes.length);\n};\n\nvar parseSamples = function parseSamples(data, entrySize, parseEntry) {\n if (entrySize === void 0) {\n entrySize = 4;\n }\n\n if (parseEntry === void 0) {\n parseEntry = function parseEntry(d) {\n return bytesToNumber(d);\n };\n }\n\n var entries = [];\n\n if (!data || !data.length) {\n return entries;\n }\n\n var entryCount = bytesToNumber(data.subarray(4, 8));\n\n for (var i = 8; entryCount; i += entrySize, entryCount--) {\n entries.push(parseEntry(data.subarray(i, i + entrySize)));\n }\n\n return entries;\n};\n\nexport var buildFrameTable = function buildFrameTable(stbl, timescale) {\n var keySamples = parseSamples(findBox(stbl, ['stss'])[0]);\n var chunkOffsets = parseSamples(findBox(stbl, ['stco'])[0]);\n var timeToSamples = parseSamples(findBox(stbl, ['stts'])[0], 8, function (entry) {\n return {\n sampleCount: bytesToNumber(entry.subarray(0, 4)),\n sampleDelta: bytesToNumber(entry.subarray(4, 8))\n };\n });\n var samplesToChunks = parseSamples(findBox(stbl, ['stsc'])[0], 12, function (entry) {\n return {\n firstChunk: bytesToNumber(entry.subarray(0, 4)),\n samplesPerChunk: bytesToNumber(entry.subarray(4, 8)),\n sampleDescriptionIndex: bytesToNumber(entry.subarray(8, 12))\n };\n });\n var stsz = findBox(stbl, ['stsz'])[0]; // stsz starts with a 4 byte sampleSize which we don't need\n\n var sampleSizes = parseSamples(stsz && stsz.length && stsz.subarray(4) || null);\n var frames = [];\n\n for (var chunkIndex = 0; chunkIndex < chunkOffsets.length; chunkIndex++) {\n var samplesInChunk = void 0;\n\n for (var i = 0; i < samplesToChunks.length; i++) {\n var sampleToChunk = samplesToChunks[i];\n var isThisOne = chunkIndex + 1 >= sampleToChunk.firstChunk && (i + 1 >= samplesToChunks.length || chunkIndex + 1 < samplesToChunks[i + 1].firstChunk);\n\n if (isThisOne) {\n samplesInChunk = sampleToChunk.samplesPerChunk;\n break;\n }\n }\n\n var chunkOffset = chunkOffsets[chunkIndex];\n\n for (var _i = 0; _i < samplesInChunk; _i++) {\n var frameEnd = sampleSizes[frames.length]; // if we don't have key samples every frame is a keyframe\n\n var keyframe = !keySamples.length;\n\n if (keySamples.length && keySamples.indexOf(frames.length + 1) !== -1) {\n keyframe = true;\n }\n\n var frame = {\n keyframe: keyframe,\n start: chunkOffset,\n end: chunkOffset + frameEnd\n };\n\n for (var k = 0; k < timeToSamples.length; k++) {\n var _timeToSamples$k = timeToSamples[k],\n sampleCount = _timeToSamples$k.sampleCount,\n sampleDelta = _timeToSamples$k.sampleDelta;\n\n if (frames.length <= sampleCount) {\n // ms to ns\n var lastTimestamp = frames.length ? frames[frames.length - 1].timestamp : 0;\n frame.timestamp = lastTimestamp + sampleDelta / timescale * 1000;\n frame.duration = sampleDelta;\n break;\n }\n }\n\n frames.push(frame);\n chunkOffset += frameEnd;\n }\n }\n\n return frames;\n};\nexport var addSampleDescription = function addSampleDescription(track, bytes) {\n var codec = bytesToString(bytes.subarray(0, 4));\n\n if (track.type === 'video') {\n track.info = track.info || {};\n track.info.width = bytes[28] << 8 | bytes[29];\n track.info.height = bytes[30] << 8 | bytes[31];\n } else if (track.type === 'audio') {\n track.info = track.info || {};\n track.info.channels = bytes[20] << 8 | bytes[21];\n track.info.bitDepth = bytes[22] << 8 | bytes[23];\n track.info.sampleRate = bytes[28] << 8 | bytes[29];\n }\n\n if (codec === 'avc1') {\n var avcC = findNamedBox(bytes, 'avcC'); // AVCDecoderConfigurationRecord\n\n codec += \".\" + getAvcCodec(avcC);\n track.info.avcC = avcC; // TODO: do we need to parse all this?\n\n /* {\n configurationVersion: avcC[0],\n profile: avcC[1],\n profileCompatibility: avcC[2],\n level: avcC[3],\n lengthSizeMinusOne: avcC[4] & 0x3\n };\n let spsNalUnitCount = avcC[5] & 0x1F;\n const spsNalUnits = track.info.avc.spsNalUnits = [];\n // past spsNalUnitCount\n let offset = 6;\n while (spsNalUnitCount--) {\n const nalLen = avcC[offset] << 8 | avcC[offset + 1];\n spsNalUnits.push(avcC.subarray(offset + 2, offset + 2 + nalLen));\n offset += nalLen + 2;\n }\n let ppsNalUnitCount = avcC[offset];\n const ppsNalUnits = track.info.avc.ppsNalUnits = [];\n // past ppsNalUnitCount\n offset += 1;\n while (ppsNalUnitCount--) {\n const nalLen = avcC[offset] << 8 | avcC[offset + 1];\n ppsNalUnits.push(avcC.subarray(offset + 2, offset + 2 + nalLen));\n offset += nalLen + 2;\n }*/\n // HEVCDecoderConfigurationRecord\n } else if (codec === 'hvc1' || codec === 'hev1') {\n codec += \".\" + getHvcCodec(findNamedBox(bytes, 'hvcC'));\n } else if (codec === 'mp4a' || codec === 'mp4v') {\n var esds = findNamedBox(bytes, 'esds');\n var esDescriptor = parseDescriptors(esds.subarray(4))[0];\n var decoderConfig = esDescriptor && esDescriptor.descriptors.filter(function (_ref) {\n var tag = _ref.tag;\n return tag === 0x04;\n })[0];\n\n if (decoderConfig) {\n // most codecs do not have a further '.'\n // such as 0xa5 for ac-3 and 0xa6 for e-ac-3\n codec += '.' + toHexString(decoderConfig.oti);\n\n if (decoderConfig.oti === 0x40) {\n codec += '.' + (decoderConfig.descriptors[0].bytes[0] >> 3).toString();\n } else if (decoderConfig.oti === 0x20) {\n codec += '.' + decoderConfig.descriptors[0].bytes[4].toString();\n } else if (decoderConfig.oti === 0xdd) {\n codec = 'vorbis';\n }\n } else if (track.type === 'audio') {\n codec += '.40.2';\n } else {\n codec += '.20.9';\n }\n } else if (codec === 'av01') {\n // AV1DecoderConfigurationRecord\n codec += \".\" + getAv1Codec(findNamedBox(bytes, 'av1C'));\n } else if (codec === 'vp09') {\n // VPCodecConfigurationRecord\n var vpcC = findNamedBox(bytes, 'vpcC'); // https://www.webmproject.org/vp9/mp4/\n\n var profile = vpcC[0];\n var level = vpcC[1];\n var bitDepth = vpcC[2] >> 4;\n var chromaSubsampling = (vpcC[2] & 0x0F) >> 1;\n var videoFullRangeFlag = (vpcC[2] & 0x0F) >> 3;\n var colourPrimaries = vpcC[3];\n var transferCharacteristics = vpcC[4];\n var matrixCoefficients = vpcC[5];\n codec += \".\" + padStart(profile, 2, '0');\n codec += \".\" + padStart(level, 2, '0');\n codec += \".\" + padStart(bitDepth, 2, '0');\n codec += \".\" + padStart(chromaSubsampling, 2, '0');\n codec += \".\" + padStart(colourPrimaries, 2, '0');\n codec += \".\" + padStart(transferCharacteristics, 2, '0');\n codec += \".\" + padStart(matrixCoefficients, 2, '0');\n codec += \".\" + padStart(videoFullRangeFlag, 2, '0');\n } else if (codec === 'theo') {\n codec = 'theora';\n } else if (codec === 'spex') {\n codec = 'speex';\n } else if (codec === '.mp3') {\n codec = 'mp4a.40.34';\n } else if (codec === 'msVo') {\n codec = 'vorbis';\n } else if (codec === 'Opus') {\n codec = 'opus';\n var dOps = findNamedBox(bytes, 'dOps');\n track.info.opus = parseOpusHead(dOps); // TODO: should this go into the webm code??\n // Firefox requires a codecDelay for opus playback\n // see https://bugzilla.mozilla.org/show_bug.cgi?id=1276238\n\n track.info.codecDelay = 6500000;\n } else {\n codec = codec.toLowerCase();\n }\n /* eslint-enable */\n // flac, ac-3, ec-3, opus\n\n\n track.codec = codec;\n};\nexport var parseTracks = function parseTracks(bytes, frameTable) {\n if (frameTable === void 0) {\n frameTable = true;\n }\n\n bytes = toUint8(bytes);\n var traks = findBox(bytes, ['moov', 'trak'], true);\n var tracks = [];\n traks.forEach(function (trak) {\n var track = {\n bytes: trak\n };\n var mdia = findBox(trak, ['mdia'])[0];\n var hdlr = findBox(mdia, ['hdlr'])[0];\n var trakType = bytesToString(hdlr.subarray(8, 12));\n\n if (trakType === 'soun') {\n track.type = 'audio';\n } else if (trakType === 'vide') {\n track.type = 'video';\n } else {\n track.type = trakType;\n }\n\n var tkhd = findBox(trak, ['tkhd'])[0];\n\n if (tkhd) {\n var view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n var tkhdVersion = view.getUint8(0);\n track.number = tkhdVersion === 0 ? view.getUint32(12) : view.getUint32(20);\n }\n\n var mdhd = findBox(mdia, ['mdhd'])[0];\n\n if (mdhd) {\n // mdhd is a FullBox, meaning it will have its own version as the first byte\n var version = mdhd[0];\n var index = version === 0 ? 12 : 20;\n track.timescale = (mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]) >>> 0;\n }\n\n var stbl = findBox(mdia, ['minf', 'stbl'])[0];\n var stsd = findBox(stbl, ['stsd'])[0];\n var descriptionCount = bytesToNumber(stsd.subarray(4, 8));\n var offset = 8; // add codec and codec info\n\n while (descriptionCount--) {\n var len = bytesToNumber(stsd.subarray(offset, offset + 4));\n var sampleDescriptor = stsd.subarray(offset + 4, offset + 4 + len);\n addSampleDescription(track, sampleDescriptor);\n offset += 4 + len;\n }\n\n if (frameTable) {\n track.frameTable = buildFrameTable(stbl, track.timescale);\n } // codec has no sub parameters\n\n\n tracks.push(track);\n });\n return tracks;\n};\nexport var parseMediaInfo = function parseMediaInfo(bytes) {\n var mvhd = findBox(bytes, ['moov', 'mvhd'], true)[0];\n\n if (!mvhd || !mvhd.length) {\n return;\n }\n\n var info = {}; // ms to ns\n // mvhd v1 has 8 byte duration and other fields too\n\n if (mvhd[0] === 1) {\n info.timestampScale = bytesToNumber(mvhd.subarray(20, 24));\n info.duration = bytesToNumber(mvhd.subarray(24, 32));\n } else {\n info.timestampScale = bytesToNumber(mvhd.subarray(12, 16));\n info.duration = bytesToNumber(mvhd.subarray(16, 20));\n }\n\n info.bytes = mvhd;\n return info;\n};", "import { toUint8, bytesToNumber, bytesMatch, bytesToString, numberToBytes, padStart } from './byte-helpers';\nimport { getAvcCodec, getHvcCodec, getAv1Codec } from './codec-helpers.js'; // relevant specs for this parser:\n// https://matroska-org.github.io/libebml/specs.html\n// https://www.matroska.org/technical/elements.html\n// https://www.webmproject.org/docs/container/\n\nexport var EBML_TAGS = {\n EBML: toUint8([0x1A, 0x45, 0xDF, 0xA3]),\n DocType: toUint8([0x42, 0x82]),\n Segment: toUint8([0x18, 0x53, 0x80, 0x67]),\n SegmentInfo: toUint8([0x15, 0x49, 0xA9, 0x66]),\n Tracks: toUint8([0x16, 0x54, 0xAE, 0x6B]),\n Track: toUint8([0xAE]),\n TrackNumber: toUint8([0xd7]),\n DefaultDuration: toUint8([0x23, 0xe3, 0x83]),\n TrackEntry: toUint8([0xAE]),\n TrackType: toUint8([0x83]),\n FlagDefault: toUint8([0x88]),\n CodecID: toUint8([0x86]),\n CodecPrivate: toUint8([0x63, 0xA2]),\n VideoTrack: toUint8([0xe0]),\n AudioTrack: toUint8([0xe1]),\n // Not used yet, but will be used for live webm/mkv\n // see https://www.matroska.org/technical/basics.html#block-structure\n // see https://www.matroska.org/technical/basics.html#simpleblock-structure\n Cluster: toUint8([0x1F, 0x43, 0xB6, 0x75]),\n Timestamp: toUint8([0xE7]),\n TimestampScale: toUint8([0x2A, 0xD7, 0xB1]),\n BlockGroup: toUint8([0xA0]),\n BlockDuration: toUint8([0x9B]),\n Block: toUint8([0xA1]),\n SimpleBlock: toUint8([0xA3])\n};\n/**\n * This is a simple table to determine the length\n * of things in ebml. The length is one based (starts at 1,\n * rather than zero) and for every zero bit before a one bit\n * we add one to length. We also need this table because in some\n * case we have to xor all the length bits from another value.\n */\n\nvar LENGTH_TABLE = [128, 64, 32, 16, 8, 4, 2, 1];\n\nvar getLength = function getLength(byte) {\n var len = 1;\n\n for (var i = 0; i < LENGTH_TABLE.length; i++) {\n if (byte & LENGTH_TABLE[i]) {\n break;\n }\n\n len++;\n }\n\n return len;\n}; // length in ebml is stored in the first 4 to 8 bits\n// of the first byte. 4 for the id length and 8 for the\n// data size length. Length is measured by converting the number to binary\n// then 1 + the number of zeros before a 1 is encountered starting\n// from the left.\n\n\nvar getvint = function getvint(bytes, offset, removeLength, signed) {\n if (removeLength === void 0) {\n removeLength = true;\n }\n\n if (signed === void 0) {\n signed = false;\n }\n\n var length = getLength(bytes[offset]);\n var valueBytes = bytes.subarray(offset, offset + length); // NOTE that we do **not** subarray here because we need to copy these bytes\n // as they will be modified below to remove the dataSizeLen bits and we do not\n // want to modify the original data. normally we could just call slice on\n // uint8array but ie 11 does not support that...\n\n if (removeLength) {\n valueBytes = Array.prototype.slice.call(bytes, offset, offset + length);\n valueBytes[0] ^= LENGTH_TABLE[length - 1];\n }\n\n return {\n length: length,\n value: bytesToNumber(valueBytes, {\n signed: signed\n }),\n bytes: valueBytes\n };\n};\n\nvar normalizePath = function normalizePath(path) {\n if (typeof path === 'string') {\n return path.match(/.{1,2}/g).map(function (p) {\n return normalizePath(p);\n });\n }\n\n if (typeof path === 'number') {\n return numberToBytes(path);\n }\n\n return path;\n};\n\nvar normalizePaths = function normalizePaths(paths) {\n if (!Array.isArray(paths)) {\n return [normalizePath(paths)];\n }\n\n return paths.map(function (p) {\n return normalizePath(p);\n });\n};\n\nvar getInfinityDataSize = function getInfinityDataSize(id, bytes, offset) {\n if (offset >= bytes.length) {\n return bytes.length;\n }\n\n var innerid = getvint(bytes, offset, false);\n\n if (bytesMatch(id.bytes, innerid.bytes)) {\n return offset;\n }\n\n var dataHeader = getvint(bytes, offset + innerid.length);\n return getInfinityDataSize(id, bytes, offset + dataHeader.length + dataHeader.value + innerid.length);\n};\n/**\n * Notes on the EBLM format.\n *\n * EBLM uses \"vints\" tags. Every vint tag contains\n * two parts\n *\n * 1. The length from the first byte. You get this by\n * converting the byte to binary and counting the zeros\n * before a 1. Then you add 1 to that. Examples\n * 00011111 = length 4 because there are 3 zeros before a 1.\n * 00100000 = length 3 because there are 2 zeros before a 1.\n * 00000011 = length 7 because there are 6 zeros before a 1.\n *\n * 2. The bits used for length are removed from the first byte\n * Then all the bytes are merged into a value. NOTE: this\n * is not the case for id ebml tags as there id includes\n * length bits.\n *\n */\n\n\nexport var findEbml = function findEbml(bytes, paths) {\n paths = normalizePaths(paths);\n bytes = toUint8(bytes);\n var results = [];\n\n if (!paths.length) {\n return results;\n }\n\n var i = 0;\n\n while (i < bytes.length) {\n var id = getvint(bytes, i, false);\n var dataHeader = getvint(bytes, i + id.length);\n var dataStart = i + id.length + dataHeader.length; // dataSize is unknown or this is a live stream\n\n if (dataHeader.value === 0x7f) {\n dataHeader.value = getInfinityDataSize(id, bytes, dataStart);\n\n if (dataHeader.value !== bytes.length) {\n dataHeader.value -= dataStart;\n }\n }\n\n var dataEnd = dataStart + dataHeader.value > bytes.length ? bytes.length : dataStart + dataHeader.value;\n var data = bytes.subarray(dataStart, dataEnd);\n\n if (bytesMatch(paths[0], id.bytes)) {\n if (paths.length === 1) {\n // this is the end of the paths and we've found the tag we were\n // looking for\n results.push(data);\n } else {\n // recursively search for the next tag inside of the data\n // of this one\n results = results.concat(findEbml(data, paths.slice(1)));\n }\n }\n\n var totalLength = id.length + dataHeader.length + data.length; // move past this tag entirely, we are not looking for it\n\n i += totalLength;\n }\n\n return results;\n}; // see https://www.matroska.org/technical/basics.html#block-structure\n\nexport var decodeBlock = function decodeBlock(block, type, timestampScale, clusterTimestamp) {\n var duration;\n\n if (type === 'group') {\n duration = findEbml(block, [EBML_TAGS.BlockDuration])[0];\n\n if (duration) {\n duration = bytesToNumber(duration);\n duration = 1 / timestampScale * duration * timestampScale / 1000;\n }\n\n block = findEbml(block, [EBML_TAGS.Block])[0];\n type = 'block'; // treat data as a block after this point\n }\n\n var dv = new DataView(block.buffer, block.byteOffset, block.byteLength);\n var trackNumber = getvint(block, 0);\n var timestamp = dv.getInt16(trackNumber.length, false);\n var flags = block[trackNumber.length + 2];\n var data = block.subarray(trackNumber.length + 3); // pts/dts in seconds\n\n var ptsdts = 1 / timestampScale * (clusterTimestamp + timestamp) * timestampScale / 1000; // return the frame\n\n var parsed = {\n duration: duration,\n trackNumber: trackNumber.value,\n keyframe: type === 'simple' && flags >> 7 === 1,\n invisible: (flags & 0x08) >> 3 === 1,\n lacing: (flags & 0x06) >> 1,\n discardable: type === 'simple' && (flags & 0x01) === 1,\n frames: [],\n pts: ptsdts,\n dts: ptsdts,\n timestamp: timestamp\n };\n\n if (!parsed.lacing) {\n parsed.frames.push(data);\n return parsed;\n }\n\n var numberOfFrames = data[0] + 1;\n var frameSizes = [];\n var offset = 1; // Fixed\n\n if (parsed.lacing === 2) {\n var sizeOfFrame = (data.length - offset) / numberOfFrames;\n\n for (var i = 0; i < numberOfFrames; i++) {\n frameSizes.push(sizeOfFrame);\n }\n } // xiph\n\n\n if (parsed.lacing === 1) {\n for (var _i = 0; _i < numberOfFrames - 1; _i++) {\n var size = 0;\n\n do {\n size += data[offset];\n offset++;\n } while (data[offset - 1] === 0xFF);\n\n frameSizes.push(size);\n }\n } // ebml\n\n\n if (parsed.lacing === 3) {\n // first vint is unsinged\n // after that vints are singed and\n // based on a compounding size\n var _size = 0;\n\n for (var _i2 = 0; _i2 < numberOfFrames - 1; _i2++) {\n var vint = _i2 === 0 ? getvint(data, offset) : getvint(data, offset, true, true);\n _size += vint.value;\n frameSizes.push(_size);\n offset += vint.length;\n }\n }\n\n frameSizes.forEach(function (size) {\n parsed.frames.push(data.subarray(offset, offset + size));\n offset += size;\n });\n return parsed;\n}; // VP9 Codec Feature Metadata (CodecPrivate)\n// https://www.webmproject.org/docs/container/\n\nvar parseVp9Private = function parseVp9Private(bytes) {\n var i = 0;\n var params = {};\n\n while (i < bytes.length) {\n var id = bytes[i] & 0x7f;\n var len = bytes[i + 1];\n var val = void 0;\n\n if (len === 1) {\n val = bytes[i + 2];\n } else {\n val = bytes.subarray(i + 2, i + 2 + len);\n }\n\n if (id === 1) {\n params.profile = val;\n } else if (id === 2) {\n params.level = val;\n } else if (id === 3) {\n params.bitDepth = val;\n } else if (id === 4) {\n params.chromaSubsampling = val;\n } else {\n params[id] = val;\n }\n\n i += 2 + len;\n }\n\n return params;\n};\n\nexport var parseTracks = function parseTracks(bytes) {\n bytes = toUint8(bytes);\n var decodedTracks = [];\n var tracks = findEbml(bytes, [EBML_TAGS.Segment, EBML_TAGS.Tracks, EBML_TAGS.Track]);\n\n if (!tracks.length) {\n tracks = findEbml(bytes, [EBML_TAGS.Tracks, EBML_TAGS.Track]);\n }\n\n if (!tracks.length) {\n tracks = findEbml(bytes, [EBML_TAGS.Track]);\n }\n\n if (!tracks.length) {\n return decodedTracks;\n }\n\n tracks.forEach(function (track) {\n var trackType = findEbml(track, EBML_TAGS.TrackType)[0];\n\n if (!trackType || !trackType.length) {\n return;\n } // 1 is video, 2 is audio, 17 is subtitle\n // other values are unimportant in this context\n\n\n if (trackType[0] === 1) {\n trackType = 'video';\n } else if (trackType[0] === 2) {\n trackType = 'audio';\n } else if (trackType[0] === 17) {\n trackType = 'subtitle';\n } else {\n return;\n } // todo parse language\n\n\n var decodedTrack = {\n rawCodec: bytesToString(findEbml(track, [EBML_TAGS.CodecID])[0]),\n type: trackType,\n codecPrivate: findEbml(track, [EBML_TAGS.CodecPrivate])[0],\n number: bytesToNumber(findEbml(track, [EBML_TAGS.TrackNumber])[0]),\n defaultDuration: bytesToNumber(findEbml(track, [EBML_TAGS.DefaultDuration])[0]),\n default: findEbml(track, [EBML_TAGS.FlagDefault])[0],\n rawData: track\n };\n var codec = '';\n\n if (/V_MPEG4\\/ISO\\/AVC/.test(decodedTrack.rawCodec)) {\n codec = \"avc1.\" + getAvcCodec(decodedTrack.codecPrivate);\n } else if (/V_MPEGH\\/ISO\\/HEVC/.test(decodedTrack.rawCodec)) {\n codec = \"hev1.\" + getHvcCodec(decodedTrack.codecPrivate);\n } else if (/V_MPEG4\\/ISO\\/ASP/.test(decodedTrack.rawCodec)) {\n if (decodedTrack.codecPrivate) {\n codec = 'mp4v.20.' + decodedTrack.codecPrivate[4].toString();\n } else {\n codec = 'mp4v.20.9';\n }\n } else if (/^V_THEORA/.test(decodedTrack.rawCodec)) {\n codec = 'theora';\n } else if (/^V_VP8/.test(decodedTrack.rawCodec)) {\n codec = 'vp8';\n } else if (/^V_VP9/.test(decodedTrack.rawCodec)) {\n if (decodedTrack.codecPrivate) {\n var _parseVp9Private = parseVp9Private(decodedTrack.codecPrivate),\n profile = _parseVp9Private.profile,\n level = _parseVp9Private.level,\n bitDepth = _parseVp9Private.bitDepth,\n chromaSubsampling = _parseVp9Private.chromaSubsampling;\n\n codec = 'vp09.';\n codec += padStart(profile, 2, '0') + \".\";\n codec += padStart(level, 2, '0') + \".\";\n codec += padStart(bitDepth, 2, '0') + \".\";\n codec += \"\" + padStart(chromaSubsampling, 2, '0'); // Video -> Colour -> Ebml name\n\n var matrixCoefficients = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xB1]])[0] || [];\n var videoFullRangeFlag = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xB9]])[0] || [];\n var transferCharacteristics = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xBA]])[0] || [];\n var colourPrimaries = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xBB]])[0] || []; // if we find any optional codec parameter specify them all.\n\n if (matrixCoefficients.length || videoFullRangeFlag.length || transferCharacteristics.length || colourPrimaries.length) {\n codec += \".\" + padStart(colourPrimaries[0], 2, '0');\n codec += \".\" + padStart(transferCharacteristics[0], 2, '0');\n codec += \".\" + padStart(matrixCoefficients[0], 2, '0');\n codec += \".\" + padStart(videoFullRangeFlag[0], 2, '0');\n }\n } else {\n codec = 'vp9';\n }\n } else if (/^V_AV1/.test(decodedTrack.rawCodec)) {\n codec = \"av01.\" + getAv1Codec(decodedTrack.codecPrivate);\n } else if (/A_ALAC/.test(decodedTrack.rawCodec)) {\n codec = 'alac';\n } else if (/A_MPEG\\/L2/.test(decodedTrack.rawCodec)) {\n codec = 'mp2';\n } else if (/A_MPEG\\/L3/.test(decodedTrack.rawCodec)) {\n codec = 'mp3';\n } else if (/^A_AAC/.test(decodedTrack.rawCodec)) {\n if (decodedTrack.codecPrivate) {\n codec = 'mp4a.40.' + (decodedTrack.codecPrivate[0] >>> 3).toString();\n } else {\n codec = 'mp4a.40.2';\n }\n } else if (/^A_AC3/.test(decodedTrack.rawCodec)) {\n codec = 'ac-3';\n } else if (/^A_PCM/.test(decodedTrack.rawCodec)) {\n codec = 'pcm';\n } else if (/^A_MS\\/ACM/.test(decodedTrack.rawCodec)) {\n codec = 'speex';\n } else if (/^A_EAC3/.test(decodedTrack.rawCodec)) {\n codec = 'ec-3';\n } else if (/^A_VORBIS/.test(decodedTrack.rawCodec)) {\n codec = 'vorbis';\n } else if (/^A_FLAC/.test(decodedTrack.rawCodec)) {\n codec = 'flac';\n } else if (/^A_OPUS/.test(decodedTrack.rawCodec)) {\n codec = 'opus';\n }\n\n decodedTrack.codec = codec;\n decodedTracks.push(decodedTrack);\n });\n return decodedTracks.sort(function (a, b) {\n return a.number - b.number;\n });\n};\nexport var parseData = function parseData(data, tracks) {\n var allBlocks = [];\n var segment = findEbml(data, [EBML_TAGS.Segment])[0];\n var timestampScale = findEbml(segment, [EBML_TAGS.SegmentInfo, EBML_TAGS.TimestampScale])[0]; // in nanoseconds, defaults to 1ms\n\n if (timestampScale && timestampScale.length) {\n timestampScale = bytesToNumber(timestampScale);\n } else {\n timestampScale = 1000000;\n }\n\n var clusters = findEbml(segment, [EBML_TAGS.Cluster]);\n\n if (!tracks) {\n tracks = parseTracks(segment);\n }\n\n clusters.forEach(function (cluster, ci) {\n var simpleBlocks = findEbml(cluster, [EBML_TAGS.SimpleBlock]).map(function (b) {\n return {\n type: 'simple',\n data: b\n };\n });\n var blockGroups = findEbml(cluster, [EBML_TAGS.BlockGroup]).map(function (b) {\n return {\n type: 'group',\n data: b\n };\n });\n var timestamp = findEbml(cluster, [EBML_TAGS.Timestamp])[0] || 0;\n\n if (timestamp && timestamp.length) {\n timestamp = bytesToNumber(timestamp);\n } // get all blocks then sort them into the correct order\n\n\n var blocks = simpleBlocks.concat(blockGroups).sort(function (a, b) {\n return a.data.byteOffset - b.data.byteOffset;\n });\n blocks.forEach(function (block, bi) {\n var decoded = decodeBlock(block.data, block.type, timestampScale, timestamp);\n allBlocks.push(decoded);\n });\n });\n return {\n tracks: tracks,\n blocks: allBlocks\n };\n};", "import { bytesMatch, toUint8 } from './byte-helpers.js';\nexport var NAL_TYPE_ONE = toUint8([0x00, 0x00, 0x00, 0x01]);\nexport var NAL_TYPE_TWO = toUint8([0x00, 0x00, 0x01]);\nexport var EMULATION_PREVENTION = toUint8([0x00, 0x00, 0x03]);\n/**\n * Expunge any \"Emulation Prevention\" bytes from a \"Raw Byte\n * Sequence Payload\"\n *\n * @param data {Uint8Array} the bytes of a RBSP from a NAL\n * unit\n * @return {Uint8Array} the RBSP without any Emulation\n * Prevention Bytes\n */\n\nexport var discardEmulationPreventionBytes = function discardEmulationPreventionBytes(bytes) {\n var positions = [];\n var i = 1; // Find all `Emulation Prevention Bytes`\n\n while (i < bytes.length - 2) {\n if (bytesMatch(bytes.subarray(i, i + 3), EMULATION_PREVENTION)) {\n positions.push(i + 2);\n i++;\n }\n\n i++;\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n\n if (positions.length === 0) {\n return bytes;\n } // Create a new array to hold the NAL unit data\n\n\n var newLength = bytes.length - positions.length;\n var newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === positions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n positions.shift();\n }\n\n newData[i] = bytes[sourceIndex];\n }\n\n return newData;\n};\nexport var findNal = function findNal(bytes, dataType, types, nalLimit) {\n if (nalLimit === void 0) {\n nalLimit = Infinity;\n }\n\n bytes = toUint8(bytes);\n types = [].concat(types);\n var i = 0;\n var nalStart;\n var nalsFound = 0; // keep searching until:\n // we reach the end of bytes\n // we reach the maximum number of nals they want to seach\n // NOTE: that we disregard nalLimit when we have found the start\n // of the nal we want so that we can find the end of the nal we want.\n\n while (i < bytes.length && (nalsFound < nalLimit || nalStart)) {\n var nalOffset = void 0;\n\n if (bytesMatch(bytes.subarray(i), NAL_TYPE_ONE)) {\n nalOffset = 4;\n } else if (bytesMatch(bytes.subarray(i), NAL_TYPE_TWO)) {\n nalOffset = 3;\n } // we are unsynced,\n // find the next nal unit\n\n\n if (!nalOffset) {\n i++;\n continue;\n }\n\n nalsFound++;\n\n if (nalStart) {\n return discardEmulationPreventionBytes(bytes.subarray(nalStart, i));\n }\n\n var nalType = void 0;\n\n if (dataType === 'h264') {\n nalType = bytes[i + nalOffset] & 0x1f;\n } else if (dataType === 'h265') {\n nalType = bytes[i + nalOffset] >> 1 & 0x3f;\n }\n\n if (types.indexOf(nalType) !== -1) {\n nalStart = i + nalOffset;\n } // nal header is 1 length for h264, and 2 for h265\n\n\n i += nalOffset + (dataType === 'h264' ? 1 : 2);\n }\n\n return bytes.subarray(0, 0);\n};\nexport var findH264Nal = function findH264Nal(bytes, type, nalLimit) {\n return findNal(bytes, 'h264', type, nalLimit);\n};\nexport var findH265Nal = function findH265Nal(bytes, type, nalLimit) {\n return findNal(bytes, 'h265', type, nalLimit);\n};", "import { toUint8, bytesMatch } from './byte-helpers.js';\nimport { findBox } from './mp4-helpers.js';\nimport { findEbml, EBML_TAGS } from './ebml-helpers.js';\nimport { getId3Offset } from './id3-helpers.js';\nimport { findH264Nal, findH265Nal } from './nal-helpers.js';\nvar CONSTANTS = {\n // \"webm\" string literal in hex\n 'webm': toUint8([0x77, 0x65, 0x62, 0x6d]),\n // \"matroska\" string literal in hex\n 'matroska': toUint8([0x6d, 0x61, 0x74, 0x72, 0x6f, 0x73, 0x6b, 0x61]),\n // \"fLaC\" string literal in hex\n 'flac': toUint8([0x66, 0x4c, 0x61, 0x43]),\n // \"OggS\" string literal in hex\n 'ogg': toUint8([0x4f, 0x67, 0x67, 0x53]),\n // ac-3 sync byte, also works for ec-3 as that is simply a codec\n // of ac-3\n 'ac3': toUint8([0x0b, 0x77]),\n // \"RIFF\" string literal in hex used for wav and avi\n 'riff': toUint8([0x52, 0x49, 0x46, 0x46]),\n // \"AVI\" string literal in hex\n 'avi': toUint8([0x41, 0x56, 0x49]),\n // \"WAVE\" string literal in hex\n 'wav': toUint8([0x57, 0x41, 0x56, 0x45]),\n // \"ftyp3g\" string literal in hex\n '3gp': toUint8([0x66, 0x74, 0x79, 0x70, 0x33, 0x67]),\n // \"ftyp\" string literal in hex\n 'mp4': toUint8([0x66, 0x74, 0x79, 0x70]),\n // \"styp\" string literal in hex\n 'fmp4': toUint8([0x73, 0x74, 0x79, 0x70]),\n // \"ftypqt\" string literal in hex\n 'mov': toUint8([0x66, 0x74, 0x79, 0x70, 0x71, 0x74]),\n // moov string literal in hex\n 'moov': toUint8([0x6D, 0x6F, 0x6F, 0x76]),\n // moof string literal in hex\n 'moof': toUint8([0x6D, 0x6F, 0x6F, 0x66])\n};\nvar _isLikely = {\n aac: function aac(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, [0xFF, 0x10], {\n offset: offset,\n mask: [0xFF, 0x16]\n });\n },\n mp3: function mp3(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, [0xFF, 0x02], {\n offset: offset,\n mask: [0xFF, 0x06]\n });\n },\n webm: function webm(bytes) {\n var docType = findEbml(bytes, [EBML_TAGS.EBML, EBML_TAGS.DocType])[0]; // check if DocType EBML tag is webm\n\n return bytesMatch(docType, CONSTANTS.webm);\n },\n mkv: function mkv(bytes) {\n var docType = findEbml(bytes, [EBML_TAGS.EBML, EBML_TAGS.DocType])[0]; // check if DocType EBML tag is matroska\n\n return bytesMatch(docType, CONSTANTS.matroska);\n },\n mp4: function mp4(bytes) {\n // if this file is another base media file format, it is not mp4\n if (_isLikely['3gp'](bytes) || _isLikely.mov(bytes)) {\n return false;\n } // if this file starts with a ftyp or styp box its mp4\n\n\n if (bytesMatch(bytes, CONSTANTS.mp4, {\n offset: 4\n }) || bytesMatch(bytes, CONSTANTS.fmp4, {\n offset: 4\n })) {\n return true;\n } // if this file starts with a moof/moov box its mp4\n\n\n if (bytesMatch(bytes, CONSTANTS.moof, {\n offset: 4\n }) || bytesMatch(bytes, CONSTANTS.moov, {\n offset: 4\n })) {\n return true;\n }\n },\n mov: function mov(bytes) {\n return bytesMatch(bytes, CONSTANTS.mov, {\n offset: 4\n });\n },\n '3gp': function gp(bytes) {\n return bytesMatch(bytes, CONSTANTS['3gp'], {\n offset: 4\n });\n },\n ac3: function ac3(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, CONSTANTS.ac3, {\n offset: offset\n });\n },\n ts: function ts(bytes) {\n if (bytes.length < 189 && bytes.length >= 1) {\n return bytes[0] === 0x47;\n }\n\n var i = 0; // check the first 376 bytes for two matching sync bytes\n\n while (i + 188 < bytes.length && i < 188) {\n if (bytes[i] === 0x47 && bytes[i + 188] === 0x47) {\n return true;\n }\n\n i += 1;\n }\n\n return false;\n },\n flac: function flac(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, CONSTANTS.flac, {\n offset: offset\n });\n },\n ogg: function ogg(bytes) {\n return bytesMatch(bytes, CONSTANTS.ogg);\n },\n avi: function avi(bytes) {\n return bytesMatch(bytes, CONSTANTS.riff) && bytesMatch(bytes, CONSTANTS.avi, {\n offset: 8\n });\n },\n wav: function wav(bytes) {\n return bytesMatch(bytes, CONSTANTS.riff) && bytesMatch(bytes, CONSTANTS.wav, {\n offset: 8\n });\n },\n 'h264': function h264(bytes) {\n // find seq_parameter_set_rbsp\n return findH264Nal(bytes, 7, 3).length;\n },\n 'h265': function h265(bytes) {\n // find video_parameter_set_rbsp or seq_parameter_set_rbsp\n return findH265Nal(bytes, [32, 33], 3).length;\n }\n}; // get all the isLikely functions\n// but make sure 'ts' is above h264 and h265\n// but below everything else as it is the least specific\n\nvar isLikelyTypes = Object.keys(_isLikely) // remove ts, h264, h265\n.filter(function (t) {\n return t !== 'ts' && t !== 'h264' && t !== 'h265';\n}) // add it back to the bottom\n.concat(['ts', 'h264', 'h265']); // make sure we are dealing with uint8 data.\n\nisLikelyTypes.forEach(function (type) {\n var isLikelyFn = _isLikely[type];\n\n _isLikely[type] = function (bytes) {\n return isLikelyFn(toUint8(bytes));\n };\n}); // export after wrapping\n\nexport var isLikely = _isLikely; // A useful list of file signatures can be found here\n// https://en.wikipedia.org/wiki/List_of_file_signatures\n\nexport var detectContainerForBytes = function detectContainerForBytes(bytes) {\n bytes = toUint8(bytes);\n\n for (var i = 0; i < isLikelyTypes.length; i++) {\n var type = isLikelyTypes[i];\n\n if (isLikely[type](bytes)) {\n return type;\n }\n }\n\n return '';\n}; // fmp4 is not a container\n\nexport var isLikelyFmp4MediaSegment = function isLikelyFmp4MediaSegment(bytes) {\n return findBox(bytes, ['moof']).length > 0;\n};", "import { Controller } from \"@hotwired/stimulus\"\nimport videojs from \"video.js\"\n\nexport default class extends Controller {\n static get targets() {\n return [\"player\", \"saveProgressForm\", \"submitButton\"];\n }\n\n static get values() {\n return { saveProgress: Boolean, lang: String, autoplay: { type: Boolean, default: false } };\n }\n\n connect() {\n this.player = videojs(this.playerTarget, { autoplay: this.autoplayValue });\n this.startedAt = undefined;\n this.playerTarget.querySelector('.vjs-big-play-button').focus();\n }\n\n disconnect() {\n this.player.dispose();\n }\n\n focusPlay() {\n this.playerTarget.querySelector('.vjs-play-control').focus();\n }\n\n enableTextTrack() {\n const track = this.player && this.player.textTracks().tracks_\n .find(track => track.kind === 'subtitles' && track.language === this.langValue);\n if (track) {\n track.mode = 'showing';\n this.enabledTextTrack = true;\n }\n }\n\n play() {\n this.startedAt = new Date().toISOString();\n if (!this.enabledTextTrack) {\n this.enableTextTrack();\n }\n }\n\n sendStats() {\n if (this.saveProgressValue && this.startedAt) {\n const form = this.saveProgressFormTarget;\n form._user_session_started.value = this.startedAt;\n form._user_session_ended.value = new Date().toISOString();\n form._user_session_completed.value = this.player.currentTime() > 0.9 * this.player.duration();\n form._user_session_idempotency_key.value = this.guid();\n this.submitButtonTarget.click();\n this.startedAt = undefined;\n }\n }\n\n reset() {\n this.player.hasStarted(false);\n if (this.player.isFullscreen() && document.fullscreenElement) {\n this.player.exitFullscreen();\n }\n }\n\n // generate a UUID-looking random string, https://stackoverflow.com/a/105074\n guid() {\n const s4 = () => Math.floor((1 + Math.random()) * 0x10000).toString(16).substring(1);\n return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4();\n }\n}\n", "import { Controller } from \"@hotwired/stimulus\"\n\nexport default class extends Controller {\n static targets = [\"overlay\", \"previous\", \"next\"]\n\n connect() {\n this.origBodyOverflowY = document.body.style.overflowY\n this.origBodyHeight = document.body.style.height\n this.origBodyPosition = document.body.style.position\n }\n\n previous() {\n if (this.hasPreviousTarget) {\n this.previousTarget.click()\n }\n }\n\n next() {\n if (this.hasNextTarget) {\n this.nextTarget.click()\n }\n }\n\n close() {\n document.body.style.overflowY = this.origBodyOverflowY\n document.body.style.height = this.origBodyHeight\n document.body.style.position = this.origBodyPosition\n this.element.innerHTML = \"\"\n }\n\n overlayTargetConnected() {\n document.body.style.overflowY = \"hidden\"\n document.body.style.height = \"100%\"\n document.body.style.position = \"relative\"\n }\n}\n", "import { Controller } from \"@hotwired/stimulus\"\n\nexport default class extends Controller {\n static get targets() {\n return [\"wrap\", \"menu\", \"subMenu\"];\n }\n\n open() {\n this.wrapTarget.classList.add(\"move-right\");\n }\n\n close() {\n this.wrapTarget.classList.remove(\"move-right\");\n }\n\n toggle() {\n this.wrapTarget.classList.toggle(\"move-right\");\n }\n\n openSubMenu() {\n this.menuTarget.scroll({ top: 0, behavior: 'smooth' });\n this.subMenuTarget.classList.add(\"move-right\");\n }\n\n closeSubMenu() {\n this.subMenuTarget.classList.remove(\"move-right\");\n }\n}\n", "import { Controller } from \"@hotwired/stimulus\"\nimport Swiper from \"swiper\"\nimport {Navigation} from \"swiper/modules\"\n\nexport default class extends Controller {\n static get targets() {\n return [\"cards\", \"navPrev\", \"navNext\"];\n }\n\n static get values() {\n return { wide: { type: Boolean, default: false } };\n }\n\n connect() {\n const swiper = new Swiper(this.cardsTarget, {\n modules: [Navigation],\n centerInsufficientSlides: true,\n spaceBetween: 15,\n breakpoints: this.wideValue ? {\n 640: {slidesPerView: 2},\n 1280: {slidesPerView: 3},\n 1600: {slidesPerView: 4},\n } : {\n 350: {slidesPerView: 2},\n 640: {slidesPerView: 3},\n 1024: {slidesPerView: 4},\n 1280: {slidesPerView: 5},\n 1536: {slidesPerView: 6},\n },\n navigation: {\n nextEl: this.navNextTarget,\n prevEl: this.navPrevTarget,\n }\n });\n this.dispatch(\"init\", { detail: { swiper } });\n this.cardsTarget.classList.remove(\"initializing\");\n }\n\n disconnect() {\n this.cardsTarget.swiper.destroy();\n }\n}\n", "var V3_URL = 'https://js.stripe.com/v3';\nvar V3_URL_REGEX = /^https:\\/\\/js\\.stripe\\.com\\/v3\\/?(\\?.*)?$/;\nvar EXISTING_SCRIPT_MESSAGE = 'loadStripe.setLoadParameters was called but an existing Stripe.js script already exists in the document; existing script parameters will be used';\nvar findScript = function findScript() {\n var scripts = document.querySelectorAll(\"script[src^=\\\"\".concat(V3_URL, \"\\\"]\"));\n\n for (var i = 0; i < scripts.length; i++) {\n var script = scripts[i];\n\n if (!V3_URL_REGEX.test(script.src)) {\n continue;\n }\n\n return script;\n }\n\n return null;\n};\n\nvar injectScript = function injectScript(params) {\n var queryString = params && !params.advancedFraudSignals ? '?advancedFraudSignals=false' : '';\n var script = document.createElement('script');\n script.src = \"\".concat(V3_URL).concat(queryString);\n var headOrBody = document.head || document.body;\n\n if (!headOrBody) {\n throw new Error('Expected document.body not to be null. Stripe.js requires a element.');\n }\n\n headOrBody.appendChild(script);\n return script;\n};\n\nvar registerWrapper = function registerWrapper(stripe, startTime) {\n if (!stripe || !stripe._registerWrapper) {\n return;\n }\n\n stripe._registerWrapper({\n name: 'stripe-js',\n version: \"3.4.1\",\n startTime: startTime\n });\n};\n\nvar stripePromise = null;\nvar onErrorListener = null;\nvar onLoadListener = null;\n\nvar onError = function onError(reject) {\n return function () {\n reject(new Error('Failed to load Stripe.js'));\n };\n};\n\nvar onLoad = function onLoad(resolve, reject) {\n return function () {\n if (window.Stripe) {\n resolve(window.Stripe);\n } else {\n reject(new Error('Stripe.js not available'));\n }\n };\n};\n\nvar loadScript = function loadScript(params) {\n // Ensure that we only attempt to load Stripe.js at most once\n if (stripePromise !== null) {\n return stripePromise;\n }\n\n stripePromise = new Promise(function (resolve, reject) {\n if (typeof window === 'undefined' || typeof document === 'undefined') {\n // Resolve to null when imported server side. This makes the module\n // safe to import in an isomorphic code base.\n resolve(null);\n return;\n }\n\n if (window.Stripe && params) {\n console.warn(EXISTING_SCRIPT_MESSAGE);\n }\n\n if (window.Stripe) {\n resolve(window.Stripe);\n return;\n }\n\n try {\n var script = findScript();\n\n if (script && params) {\n console.warn(EXISTING_SCRIPT_MESSAGE);\n } else if (!script) {\n script = injectScript(params);\n } else if (script && onLoadListener !== null && onErrorListener !== null) {\n var _script$parentNode;\n\n // remove event listeners\n script.removeEventListener('load', onLoadListener);\n script.removeEventListener('error', onErrorListener); // if script exists, but we are reloading due to an error,\n // reload script to trigger 'load' event\n\n (_script$parentNode = script.parentNode) === null || _script$parentNode === void 0 ? void 0 : _script$parentNode.removeChild(script);\n script = injectScript(params);\n }\n\n onLoadListener = onLoad(resolve, reject);\n onErrorListener = onError(reject);\n script.addEventListener('load', onLoadListener);\n script.addEventListener('error', onErrorListener);\n } catch (error) {\n reject(error);\n return;\n }\n }); // Resets stripePromise on error\n\n return stripePromise[\"catch\"](function (error) {\n stripePromise = null;\n return Promise.reject(error);\n });\n};\nvar initStripe = function initStripe(maybeStripe, args, startTime) {\n if (maybeStripe === null) {\n return null;\n }\n\n var stripe = maybeStripe.apply(undefined, args);\n registerWrapper(stripe, startTime);\n return stripe;\n}; // eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types\n\nvar stripePromise$1;\nvar loadCalled = false;\n\nvar getStripePromise = function getStripePromise() {\n if (stripePromise$1) {\n return stripePromise$1;\n }\n\n stripePromise$1 = loadScript(null)[\"catch\"](function (error) {\n // clear cache on error\n stripePromise$1 = null;\n return Promise.reject(error);\n });\n return stripePromise$1;\n}; // Execute our own script injection after a tick to give users time to do their\n// own script injection.\n\n\nPromise.resolve().then(function () {\n return getStripePromise();\n})[\"catch\"](function (error) {\n if (!loadCalled) {\n console.warn(error);\n }\n});\nvar loadStripe = function loadStripe() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n loadCalled = true;\n var startTime = Date.now(); // if previous attempts are unsuccessful, will re-load script\n\n return getStripePromise().then(function (maybeStripe) {\n return initStripe(maybeStripe, args, startTime);\n });\n};\n\nexport { loadStripe };\n", "import { Controller } from \"@hotwired/stimulus\"\nimport { loadStripe } from \"@stripe/stripe-js\"\nimport { isTurboNative } from \"../lib/environment\"\n\nexport default class extends Controller {\n static targets = [\"checkout\"]\n\n static get shouldLoad() {\n return !isTurboNative\n }\n\n async checkoutTargetConnected(target) {\n const key = target.dataset[\"key\"]\n const clientSecret = target.dataset[\"secret\"]\n target.removeAttribute(\"data-key\")\n target.removeAttribute(\"data-secret\")\n this.stripe = await loadStripe(key)\n this.checkout = await this.stripe.initEmbeddedCheckout({ clientSecret })\n this.checkout.mount(target)\n }\n\n checkoutTargetDisconnected() {\n this.checkout.destroy()\n }\n}\n", "/**\n * Custom positioning reference element.\n * @see https://floating-ui.com/docs/virtual-elements\n */\n\nconst sides = ['top', 'right', 'bottom', 'left'];\nconst alignments = ['start', 'end'];\nconst placements = /*#__PURE__*/sides.reduce((acc, side) => acc.concat(side, side + \"-\" + alignments[0], side + \"-\" + alignments[1]), []);\nconst min = Math.min;\nconst max = Math.max;\nconst round = Math.round;\nconst floor = Math.floor;\nconst createCoords = v => ({\n x: v,\n y: v\n});\nconst oppositeSideMap = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nconst oppositeAlignmentMap = {\n start: 'end',\n end: 'start'\n};\nfunction clamp(start, value, end) {\n return max(start, min(value, end));\n}\nfunction evaluate(value, param) {\n return typeof value === 'function' ? value(param) : value;\n}\nfunction getSide(placement) {\n return placement.split('-')[0];\n}\nfunction getAlignment(placement) {\n return placement.split('-')[1];\n}\nfunction getOppositeAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}\nfunction getAxisLength(axis) {\n return axis === 'y' ? 'height' : 'width';\n}\nfunction getSideAxis(placement) {\n return ['top', 'bottom'].includes(getSide(placement)) ? 'y' : 'x';\n}\nfunction getAlignmentAxis(placement) {\n return getOppositeAxis(getSideAxis(placement));\n}\nfunction getAlignmentSides(placement, rects, rtl) {\n if (rtl === void 0) {\n rtl = false;\n }\n const alignment = getAlignment(placement);\n const alignmentAxis = getAlignmentAxis(placement);\n const length = getAxisLength(alignmentAxis);\n let mainAlignmentSide = alignmentAxis === 'x' ? alignment === (rtl ? 'end' : 'start') ? 'right' : 'left' : alignment === 'start' ? 'bottom' : 'top';\n if (rects.reference[length] > rects.floating[length]) {\n mainAlignmentSide = getOppositePlacement(mainAlignmentSide);\n }\n return [mainAlignmentSide, getOppositePlacement(mainAlignmentSide)];\n}\nfunction getExpandedPlacements(placement) {\n const oppositePlacement = getOppositePlacement(placement);\n return [getOppositeAlignmentPlacement(placement), oppositePlacement, getOppositeAlignmentPlacement(oppositePlacement)];\n}\nfunction getOppositeAlignmentPlacement(placement) {\n return placement.replace(/start|end/g, alignment => oppositeAlignmentMap[alignment]);\n}\nfunction getSideList(side, isStart, rtl) {\n const lr = ['left', 'right'];\n const rl = ['right', 'left'];\n const tb = ['top', 'bottom'];\n const bt = ['bottom', 'top'];\n switch (side) {\n case 'top':\n case 'bottom':\n if (rtl) return isStart ? rl : lr;\n return isStart ? lr : rl;\n case 'left':\n case 'right':\n return isStart ? tb : bt;\n default:\n return [];\n }\n}\nfunction getOppositeAxisPlacements(placement, flipAlignment, direction, rtl) {\n const alignment = getAlignment(placement);\n let list = getSideList(getSide(placement), direction === 'start', rtl);\n if (alignment) {\n list = list.map(side => side + \"-\" + alignment);\n if (flipAlignment) {\n list = list.concat(list.map(getOppositeAlignmentPlacement));\n }\n }\n return list;\n}\nfunction getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, side => oppositeSideMap[side]);\n}\nfunction expandPaddingObject(padding) {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0,\n ...padding\n };\n}\nfunction getPaddingObject(padding) {\n return typeof padding !== 'number' ? expandPaddingObject(padding) : {\n top: padding,\n right: padding,\n bottom: padding,\n left: padding\n };\n}\nfunction rectToClientRect(rect) {\n const {\n x,\n y,\n width,\n height\n } = rect;\n return {\n width,\n height,\n top: y,\n left: x,\n right: x + width,\n bottom: y + height,\n x,\n y\n };\n}\n\nexport { alignments, clamp, createCoords, evaluate, expandPaddingObject, floor, getAlignment, getAlignmentAxis, getAlignmentSides, getAxisLength, getExpandedPlacements, getOppositeAlignmentPlacement, getOppositeAxis, getOppositeAxisPlacements, getOppositePlacement, getPaddingObject, getSide, getSideAxis, max, min, placements, rectToClientRect, round, sides };\n", "import { getSideAxis, getAlignmentAxis, getAxisLength, getSide, getAlignment, evaluate, getPaddingObject, rectToClientRect, min, clamp, placements, getAlignmentSides, getOppositeAlignmentPlacement, getOppositePlacement, getExpandedPlacements, getOppositeAxisPlacements, sides, max, getOppositeAxis } from '@floating-ui/utils';\nexport { rectToClientRect } from '@floating-ui/utils';\n\nfunction computeCoordsFromPlacement(_ref, placement, rtl) {\n let {\n reference,\n floating\n } = _ref;\n const sideAxis = getSideAxis(placement);\n const alignmentAxis = getAlignmentAxis(placement);\n const alignLength = getAxisLength(alignmentAxis);\n const side = getSide(placement);\n const isVertical = sideAxis === 'y';\n const commonX = reference.x + reference.width / 2 - floating.width / 2;\n const commonY = reference.y + reference.height / 2 - floating.height / 2;\n const commonAlign = reference[alignLength] / 2 - floating[alignLength] / 2;\n let coords;\n switch (side) {\n case 'top':\n coords = {\n x: commonX,\n y: reference.y - floating.height\n };\n break;\n case 'bottom':\n coords = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n case 'right':\n coords = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n case 'left':\n coords = {\n x: reference.x - floating.width,\n y: commonY\n };\n break;\n default:\n coords = {\n x: reference.x,\n y: reference.y\n };\n }\n switch (getAlignment(placement)) {\n case 'start':\n coords[alignmentAxis] -= commonAlign * (rtl && isVertical ? -1 : 1);\n break;\n case 'end':\n coords[alignmentAxis] += commonAlign * (rtl && isVertical ? -1 : 1);\n break;\n }\n return coords;\n}\n\n/**\n * Computes the `x` and `y` coordinates that will place the floating element\n * next to a given reference element.\n *\n * This export does not have any `platform` interface logic. You will need to\n * write one for the platform you are using Floating UI with.\n */\nconst computePosition = async (reference, floating, config) => {\n const {\n placement = 'bottom',\n strategy = 'absolute',\n middleware = [],\n platform\n } = config;\n const validMiddleware = middleware.filter(Boolean);\n const rtl = await (platform.isRTL == null ? void 0 : platform.isRTL(floating));\n let rects = await platform.getElementRects({\n reference,\n floating,\n strategy\n });\n let {\n x,\n y\n } = computeCoordsFromPlacement(rects, placement, rtl);\n let statefulPlacement = placement;\n let middlewareData = {};\n let resetCount = 0;\n for (let i = 0; i < validMiddleware.length; i++) {\n const {\n name,\n fn\n } = validMiddleware[i];\n const {\n x: nextX,\n y: nextY,\n data,\n reset\n } = await fn({\n x,\n y,\n initialPlacement: placement,\n placement: statefulPlacement,\n strategy,\n middlewareData,\n rects,\n platform,\n elements: {\n reference,\n floating\n }\n });\n x = nextX != null ? nextX : x;\n y = nextY != null ? nextY : y;\n middlewareData = {\n ...middlewareData,\n [name]: {\n ...middlewareData[name],\n ...data\n }\n };\n if (reset && resetCount <= 50) {\n resetCount++;\n if (typeof reset === 'object') {\n if (reset.placement) {\n statefulPlacement = reset.placement;\n }\n if (reset.rects) {\n rects = reset.rects === true ? await platform.getElementRects({\n reference,\n floating,\n strategy\n }) : reset.rects;\n }\n ({\n x,\n y\n } = computeCoordsFromPlacement(rects, statefulPlacement, rtl));\n }\n i = -1;\n }\n }\n return {\n x,\n y,\n placement: statefulPlacement,\n strategy,\n middlewareData\n };\n};\n\n/**\n * Resolves with an object of overflow side offsets that determine how much the\n * element is overflowing a given clipping boundary on each side.\n * - positive = overflowing the boundary by that number of pixels\n * - negative = how many pixels left before it will overflow\n * - 0 = lies flush with the boundary\n * @see https://floating-ui.com/docs/detectOverflow\n */\nasync function detectOverflow(state, options) {\n var _await$platform$isEle;\n if (options === void 0) {\n options = {};\n }\n const {\n x,\n y,\n platform,\n rects,\n elements,\n strategy\n } = state;\n const {\n boundary = 'clippingAncestors',\n rootBoundary = 'viewport',\n elementContext = 'floating',\n altBoundary = false,\n padding = 0\n } = evaluate(options, state);\n const paddingObject = getPaddingObject(padding);\n const altContext = elementContext === 'floating' ? 'reference' : 'floating';\n const element = elements[altBoundary ? altContext : elementContext];\n const clippingClientRect = rectToClientRect(await platform.getClippingRect({\n element: ((_await$platform$isEle = await (platform.isElement == null ? void 0 : platform.isElement(element))) != null ? _await$platform$isEle : true) ? element : element.contextElement || (await (platform.getDocumentElement == null ? void 0 : platform.getDocumentElement(elements.floating))),\n boundary,\n rootBoundary,\n strategy\n }));\n const rect = elementContext === 'floating' ? {\n x,\n y,\n width: rects.floating.width,\n height: rects.floating.height\n } : rects.reference;\n const offsetParent = await (platform.getOffsetParent == null ? void 0 : platform.getOffsetParent(elements.floating));\n const offsetScale = (await (platform.isElement == null ? void 0 : platform.isElement(offsetParent))) ? (await (platform.getScale == null ? void 0 : platform.getScale(offsetParent))) || {\n x: 1,\n y: 1\n } : {\n x: 1,\n y: 1\n };\n const elementClientRect = rectToClientRect(platform.convertOffsetParentRelativeRectToViewportRelativeRect ? await platform.convertOffsetParentRelativeRectToViewportRelativeRect({\n elements,\n rect,\n offsetParent,\n strategy\n }) : rect);\n return {\n top: (clippingClientRect.top - elementClientRect.top + paddingObject.top) / offsetScale.y,\n bottom: (elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom) / offsetScale.y,\n left: (clippingClientRect.left - elementClientRect.left + paddingObject.left) / offsetScale.x,\n right: (elementClientRect.right - clippingClientRect.right + paddingObject.right) / offsetScale.x\n };\n}\n\n/**\n * Provides data to position an inner element of the floating element so that it\n * appears centered to the reference element.\n * @see https://floating-ui.com/docs/arrow\n */\nconst arrow = options => ({\n name: 'arrow',\n options,\n async fn(state) {\n const {\n x,\n y,\n placement,\n rects,\n platform,\n elements,\n middlewareData\n } = state;\n // Since `element` is required, we don't Partial<> the type.\n const {\n element,\n padding = 0\n } = evaluate(options, state) || {};\n if (element == null) {\n return {};\n }\n const paddingObject = getPaddingObject(padding);\n const coords = {\n x,\n y\n };\n const axis = getAlignmentAxis(placement);\n const length = getAxisLength(axis);\n const arrowDimensions = await platform.getDimensions(element);\n const isYAxis = axis === 'y';\n const minProp = isYAxis ? 'top' : 'left';\n const maxProp = isYAxis ? 'bottom' : 'right';\n const clientProp = isYAxis ? 'clientHeight' : 'clientWidth';\n const endDiff = rects.reference[length] + rects.reference[axis] - coords[axis] - rects.floating[length];\n const startDiff = coords[axis] - rects.reference[axis];\n const arrowOffsetParent = await (platform.getOffsetParent == null ? void 0 : platform.getOffsetParent(element));\n let clientSize = arrowOffsetParent ? arrowOffsetParent[clientProp] : 0;\n\n // DOM platform can return `window` as the `offsetParent`.\n if (!clientSize || !(await (platform.isElement == null ? void 0 : platform.isElement(arrowOffsetParent)))) {\n clientSize = elements.floating[clientProp] || rects.floating[length];\n }\n const centerToReference = endDiff / 2 - startDiff / 2;\n\n // If the padding is large enough that it causes the arrow to no longer be\n // centered, modify the padding so that it is centered.\n const largestPossiblePadding = clientSize / 2 - arrowDimensions[length] / 2 - 1;\n const minPadding = min(paddingObject[minProp], largestPossiblePadding);\n const maxPadding = min(paddingObject[maxProp], largestPossiblePadding);\n\n // Make sure the arrow doesn't overflow the floating element if the center\n // point is outside the floating element's bounds.\n const min$1 = minPadding;\n const max = clientSize - arrowDimensions[length] - maxPadding;\n const center = clientSize / 2 - arrowDimensions[length] / 2 + centerToReference;\n const offset = clamp(min$1, center, max);\n\n // If the reference is small enough that the arrow's padding causes it to\n // to point to nothing for an aligned placement, adjust the offset of the\n // floating element itself. To ensure `shift()` continues to take action,\n // a single reset is performed when this is true.\n const shouldAddOffset = !middlewareData.arrow && getAlignment(placement) != null && center !== offset && rects.reference[length] / 2 - (center < min$1 ? minPadding : maxPadding) - arrowDimensions[length] / 2 < 0;\n const alignmentOffset = shouldAddOffset ? center < min$1 ? center - min$1 : center - max : 0;\n return {\n [axis]: coords[axis] + alignmentOffset,\n data: {\n [axis]: offset,\n centerOffset: center - offset - alignmentOffset,\n ...(shouldAddOffset && {\n alignmentOffset\n })\n },\n reset: shouldAddOffset\n };\n }\n});\n\nfunction getPlacementList(alignment, autoAlignment, allowedPlacements) {\n const allowedPlacementsSortedByAlignment = alignment ? [...allowedPlacements.filter(placement => getAlignment(placement) === alignment), ...allowedPlacements.filter(placement => getAlignment(placement) !== alignment)] : allowedPlacements.filter(placement => getSide(placement) === placement);\n return allowedPlacementsSortedByAlignment.filter(placement => {\n if (alignment) {\n return getAlignment(placement) === alignment || (autoAlignment ? getOppositeAlignmentPlacement(placement) !== placement : false);\n }\n return true;\n });\n}\n/**\n * Optimizes the visibility of the floating element by choosing the placement\n * that has the most space available automatically, without needing to specify a\n * preferred placement. Alternative to `flip`.\n * @see https://floating-ui.com/docs/autoPlacement\n */\nconst autoPlacement = function (options) {\n if (options === void 0) {\n options = {};\n }\n return {\n name: 'autoPlacement',\n options,\n async fn(state) {\n var _middlewareData$autoP, _middlewareData$autoP2, _placementsThatFitOnE;\n const {\n rects,\n middlewareData,\n placement,\n platform,\n elements\n } = state;\n const {\n crossAxis = false,\n alignment,\n allowedPlacements = placements,\n autoAlignment = true,\n ...detectOverflowOptions\n } = evaluate(options, state);\n const placements$1 = alignment !== undefined || allowedPlacements === placements ? getPlacementList(alignment || null, autoAlignment, allowedPlacements) : allowedPlacements;\n const overflow = await detectOverflow(state, detectOverflowOptions);\n const currentIndex = ((_middlewareData$autoP = middlewareData.autoPlacement) == null ? void 0 : _middlewareData$autoP.index) || 0;\n const currentPlacement = placements$1[currentIndex];\n if (currentPlacement == null) {\n return {};\n }\n const alignmentSides = getAlignmentSides(currentPlacement, rects, await (platform.isRTL == null ? void 0 : platform.isRTL(elements.floating)));\n\n // Make `computeCoords` start from the right place.\n if (placement !== currentPlacement) {\n return {\n reset: {\n placement: placements$1[0]\n }\n };\n }\n const currentOverflows = [overflow[getSide(currentPlacement)], overflow[alignmentSides[0]], overflow[alignmentSides[1]]];\n const allOverflows = [...(((_middlewareData$autoP2 = middlewareData.autoPlacement) == null ? void 0 : _middlewareData$autoP2.overflows) || []), {\n placement: currentPlacement,\n overflows: currentOverflows\n }];\n const nextPlacement = placements$1[currentIndex + 1];\n\n // There are more placements to check.\n if (nextPlacement) {\n return {\n data: {\n index: currentIndex + 1,\n overflows: allOverflows\n },\n reset: {\n placement: nextPlacement\n }\n };\n }\n const placementsSortedByMostSpace = allOverflows.map(d => {\n const alignment = getAlignment(d.placement);\n return [d.placement, alignment && crossAxis ?\n // Check along the mainAxis and main crossAxis side.\n d.overflows.slice(0, 2).reduce((acc, v) => acc + v, 0) :\n // Check only the mainAxis.\n d.overflows[0], d.overflows];\n }).sort((a, b) => a[1] - b[1]);\n const placementsThatFitOnEachSide = placementsSortedByMostSpace.filter(d => d[2].slice(0,\n // Aligned placements should not check their opposite crossAxis\n // side.\n getAlignment(d[0]) ? 2 : 3).every(v => v <= 0));\n const resetPlacement = ((_placementsThatFitOnE = placementsThatFitOnEachSide[0]) == null ? void 0 : _placementsThatFitOnE[0]) || placementsSortedByMostSpace[0][0];\n if (resetPlacement !== placement) {\n return {\n data: {\n index: currentIndex + 1,\n overflows: allOverflows\n },\n reset: {\n placement: resetPlacement\n }\n };\n }\n return {};\n }\n };\n};\n\n/**\n * Optimizes the visibility of the floating element by flipping the `placement`\n * in order to keep it in view when the preferred placement(s) will overflow the\n * clipping boundary. Alternative to `autoPlacement`.\n * @see https://floating-ui.com/docs/flip\n */\nconst flip = function (options) {\n if (options === void 0) {\n options = {};\n }\n return {\n name: 'flip',\n options,\n async fn(state) {\n var _middlewareData$arrow, _middlewareData$flip;\n const {\n placement,\n middlewareData,\n rects,\n initialPlacement,\n platform,\n elements\n } = state;\n const {\n mainAxis: checkMainAxis = true,\n crossAxis: checkCrossAxis = true,\n fallbackPlacements: specifiedFallbackPlacements,\n fallbackStrategy = 'bestFit',\n fallbackAxisSideDirection = 'none',\n flipAlignment = true,\n ...detectOverflowOptions\n } = evaluate(options, state);\n\n // If a reset by the arrow was caused due to an alignment offset being\n // added, we should skip any logic now since `flip()` has already done its\n // work.\n // https://github.com/floating-ui/floating-ui/issues/2549#issuecomment-1719601643\n if ((_middlewareData$arrow = middlewareData.arrow) != null && _middlewareData$arrow.alignmentOffset) {\n return {};\n }\n const side = getSide(placement);\n const initialSideAxis = getSideAxis(initialPlacement);\n const isBasePlacement = getSide(initialPlacement) === initialPlacement;\n const rtl = await (platform.isRTL == null ? void 0 : platform.isRTL(elements.floating));\n const fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipAlignment ? [getOppositePlacement(initialPlacement)] : getExpandedPlacements(initialPlacement));\n const hasFallbackAxisSideDirection = fallbackAxisSideDirection !== 'none';\n if (!specifiedFallbackPlacements && hasFallbackAxisSideDirection) {\n fallbackPlacements.push(...getOppositeAxisPlacements(initialPlacement, flipAlignment, fallbackAxisSideDirection, rtl));\n }\n const placements = [initialPlacement, ...fallbackPlacements];\n const overflow = await detectOverflow(state, detectOverflowOptions);\n const overflows = [];\n let overflowsData = ((_middlewareData$flip = middlewareData.flip) == null ? void 0 : _middlewareData$flip.overflows) || [];\n if (checkMainAxis) {\n overflows.push(overflow[side]);\n }\n if (checkCrossAxis) {\n const sides = getAlignmentSides(placement, rects, rtl);\n overflows.push(overflow[sides[0]], overflow[sides[1]]);\n }\n overflowsData = [...overflowsData, {\n placement,\n overflows\n }];\n\n // One or more sides is overflowing.\n if (!overflows.every(side => side <= 0)) {\n var _middlewareData$flip2, _overflowsData$filter;\n const nextIndex = (((_middlewareData$flip2 = middlewareData.flip) == null ? void 0 : _middlewareData$flip2.index) || 0) + 1;\n const nextPlacement = placements[nextIndex];\n if (nextPlacement) {\n // Try next placement and re-run the lifecycle.\n return {\n data: {\n index: nextIndex,\n overflows: overflowsData\n },\n reset: {\n placement: nextPlacement\n }\n };\n }\n\n // First, find the candidates that fit on the mainAxis side of overflow,\n // then find the placement that fits the best on the main crossAxis side.\n let resetPlacement = (_overflowsData$filter = overflowsData.filter(d => d.overflows[0] <= 0).sort((a, b) => a.overflows[1] - b.overflows[1])[0]) == null ? void 0 : _overflowsData$filter.placement;\n\n // Otherwise fallback.\n if (!resetPlacement) {\n switch (fallbackStrategy) {\n case 'bestFit':\n {\n var _overflowsData$filter2;\n const placement = (_overflowsData$filter2 = overflowsData.filter(d => {\n if (hasFallbackAxisSideDirection) {\n const currentSideAxis = getSideAxis(d.placement);\n return currentSideAxis === initialSideAxis ||\n // Create a bias to the `y` side axis due to horizontal\n // reading directions favoring greater width.\n currentSideAxis === 'y';\n }\n return true;\n }).map(d => [d.placement, d.overflows.filter(overflow => overflow > 0).reduce((acc, overflow) => acc + overflow, 0)]).sort((a, b) => a[1] - b[1])[0]) == null ? void 0 : _overflowsData$filter2[0];\n if (placement) {\n resetPlacement = placement;\n }\n break;\n }\n case 'initialPlacement':\n resetPlacement = initialPlacement;\n break;\n }\n }\n if (placement !== resetPlacement) {\n return {\n reset: {\n placement: resetPlacement\n }\n };\n }\n }\n return {};\n }\n };\n};\n\nfunction getSideOffsets(overflow, rect) {\n return {\n top: overflow.top - rect.height,\n right: overflow.right - rect.width,\n bottom: overflow.bottom - rect.height,\n left: overflow.left - rect.width\n };\n}\nfunction isAnySideFullyClipped(overflow) {\n return sides.some(side => overflow[side] >= 0);\n}\n/**\n * Provides data to hide the floating element in applicable situations, such as\n * when it is not in the same clipping context as the reference element.\n * @see https://floating-ui.com/docs/hide\n */\nconst hide = function (options) {\n if (options === void 0) {\n options = {};\n }\n return {\n name: 'hide',\n options,\n async fn(state) {\n const {\n rects\n } = state;\n const {\n strategy = 'referenceHidden',\n ...detectOverflowOptions\n } = evaluate(options, state);\n switch (strategy) {\n case 'referenceHidden':\n {\n const overflow = await detectOverflow(state, {\n ...detectOverflowOptions,\n elementContext: 'reference'\n });\n const offsets = getSideOffsets(overflow, rects.reference);\n return {\n data: {\n referenceHiddenOffsets: offsets,\n referenceHidden: isAnySideFullyClipped(offsets)\n }\n };\n }\n case 'escaped':\n {\n const overflow = await detectOverflow(state, {\n ...detectOverflowOptions,\n altBoundary: true\n });\n const offsets = getSideOffsets(overflow, rects.floating);\n return {\n data: {\n escapedOffsets: offsets,\n escaped: isAnySideFullyClipped(offsets)\n }\n };\n }\n default:\n {\n return {};\n }\n }\n }\n };\n};\n\nfunction getBoundingRect(rects) {\n const minX = min(...rects.map(rect => rect.left));\n const minY = min(...rects.map(rect => rect.top));\n const maxX = max(...rects.map(rect => rect.right));\n const maxY = max(...rects.map(rect => rect.bottom));\n return {\n x: minX,\n y: minY,\n width: maxX - minX,\n height: maxY - minY\n };\n}\nfunction getRectsByLine(rects) {\n const sortedRects = rects.slice().sort((a, b) => a.y - b.y);\n const groups = [];\n let prevRect = null;\n for (let i = 0; i < sortedRects.length; i++) {\n const rect = sortedRects[i];\n if (!prevRect || rect.y - prevRect.y > prevRect.height / 2) {\n groups.push([rect]);\n } else {\n groups[groups.length - 1].push(rect);\n }\n prevRect = rect;\n }\n return groups.map(rect => rectToClientRect(getBoundingRect(rect)));\n}\n/**\n * Provides improved positioning for inline reference elements that can span\n * over multiple lines, such as hyperlinks or range selections.\n * @see https://floating-ui.com/docs/inline\n */\nconst inline = function (options) {\n if (options === void 0) {\n options = {};\n }\n return {\n name: 'inline',\n options,\n async fn(state) {\n const {\n placement,\n elements,\n rects,\n platform,\n strategy\n } = state;\n // A MouseEvent's client{X,Y} coords can be up to 2 pixels off a\n // ClientRect's bounds, despite the event listener being triggered. A\n // padding of 2 seems to handle this issue.\n const {\n padding = 2,\n x,\n y\n } = evaluate(options, state);\n const nativeClientRects = Array.from((await (platform.getClientRects == null ? void 0 : platform.getClientRects(elements.reference))) || []);\n const clientRects = getRectsByLine(nativeClientRects);\n const fallback = rectToClientRect(getBoundingRect(nativeClientRects));\n const paddingObject = getPaddingObject(padding);\n function getBoundingClientRect() {\n // There are two rects and they are disjoined.\n if (clientRects.length === 2 && clientRects[0].left > clientRects[1].right && x != null && y != null) {\n // Find the first rect in which the point is fully inside.\n return clientRects.find(rect => x > rect.left - paddingObject.left && x < rect.right + paddingObject.right && y > rect.top - paddingObject.top && y < rect.bottom + paddingObject.bottom) || fallback;\n }\n\n // There are 2 or more connected rects.\n if (clientRects.length >= 2) {\n if (getSideAxis(placement) === 'y') {\n const firstRect = clientRects[0];\n const lastRect = clientRects[clientRects.length - 1];\n const isTop = getSide(placement) === 'top';\n const top = firstRect.top;\n const bottom = lastRect.bottom;\n const left = isTop ? firstRect.left : lastRect.left;\n const right = isTop ? firstRect.right : lastRect.right;\n const width = right - left;\n const height = bottom - top;\n return {\n top,\n bottom,\n left,\n right,\n width,\n height,\n x: left,\n y: top\n };\n }\n const isLeftSide = getSide(placement) === 'left';\n const maxRight = max(...clientRects.map(rect => rect.right));\n const minLeft = min(...clientRects.map(rect => rect.left));\n const measureRects = clientRects.filter(rect => isLeftSide ? rect.left === minLeft : rect.right === maxRight);\n const top = measureRects[0].top;\n const bottom = measureRects[measureRects.length - 1].bottom;\n const left = minLeft;\n const right = maxRight;\n const width = right - left;\n const height = bottom - top;\n return {\n top,\n bottom,\n left,\n right,\n width,\n height,\n x: left,\n y: top\n };\n }\n return fallback;\n }\n const resetRects = await platform.getElementRects({\n reference: {\n getBoundingClientRect\n },\n floating: elements.floating,\n strategy\n });\n if (rects.reference.x !== resetRects.reference.x || rects.reference.y !== resetRects.reference.y || rects.reference.width !== resetRects.reference.width || rects.reference.height !== resetRects.reference.height) {\n return {\n reset: {\n rects: resetRects\n }\n };\n }\n return {};\n }\n };\n};\n\n// For type backwards-compatibility, the `OffsetOptions` type was also\n// Derivable.\n\nasync function convertValueToCoords(state, options) {\n const {\n placement,\n platform,\n elements\n } = state;\n const rtl = await (platform.isRTL == null ? void 0 : platform.isRTL(elements.floating));\n const side = getSide(placement);\n const alignment = getAlignment(placement);\n const isVertical = getSideAxis(placement) === 'y';\n const mainAxisMulti = ['left', 'top'].includes(side) ? -1 : 1;\n const crossAxisMulti = rtl && isVertical ? -1 : 1;\n const rawValue = evaluate(options, state);\n\n // eslint-disable-next-line prefer-const\n let {\n mainAxis,\n crossAxis,\n alignmentAxis\n } = typeof rawValue === 'number' ? {\n mainAxis: rawValue,\n crossAxis: 0,\n alignmentAxis: null\n } : {\n mainAxis: 0,\n crossAxis: 0,\n alignmentAxis: null,\n ...rawValue\n };\n if (alignment && typeof alignmentAxis === 'number') {\n crossAxis = alignment === 'end' ? alignmentAxis * -1 : alignmentAxis;\n }\n return isVertical ? {\n x: crossAxis * crossAxisMulti,\n y: mainAxis * mainAxisMulti\n } : {\n x: mainAxis * mainAxisMulti,\n y: crossAxis * crossAxisMulti\n };\n}\n\n/**\n * Modifies the placement by translating the floating element along the\n * specified axes.\n * A number (shorthand for `mainAxis` or distance), or an axes configuration\n * object may be passed.\n * @see https://floating-ui.com/docs/offset\n */\nconst offset = function (options) {\n if (options === void 0) {\n options = 0;\n }\n return {\n name: 'offset',\n options,\n async fn(state) {\n var _middlewareData$offse, _middlewareData$arrow;\n const {\n x,\n y,\n placement,\n middlewareData\n } = state;\n const diffCoords = await convertValueToCoords(state, options);\n\n // If the placement is the same and the arrow caused an alignment offset\n // then we don't need to change the positioning coordinates.\n if (placement === ((_middlewareData$offse = middlewareData.offset) == null ? void 0 : _middlewareData$offse.placement) && (_middlewareData$arrow = middlewareData.arrow) != null && _middlewareData$arrow.alignmentOffset) {\n return {};\n }\n return {\n x: x + diffCoords.x,\n y: y + diffCoords.y,\n data: {\n ...diffCoords,\n placement\n }\n };\n }\n };\n};\n\n/**\n * Optimizes the visibility of the floating element by shifting it in order to\n * keep it in view when it will overflow the clipping boundary.\n * @see https://floating-ui.com/docs/shift\n */\nconst shift = function (options) {\n if (options === void 0) {\n options = {};\n }\n return {\n name: 'shift',\n options,\n async fn(state) {\n const {\n x,\n y,\n placement\n } = state;\n const {\n mainAxis: checkMainAxis = true,\n crossAxis: checkCrossAxis = false,\n limiter = {\n fn: _ref => {\n let {\n x,\n y\n } = _ref;\n return {\n x,\n y\n };\n }\n },\n ...detectOverflowOptions\n } = evaluate(options, state);\n const coords = {\n x,\n y\n };\n const overflow = await detectOverflow(state, detectOverflowOptions);\n const crossAxis = getSideAxis(getSide(placement));\n const mainAxis = getOppositeAxis(crossAxis);\n let mainAxisCoord = coords[mainAxis];\n let crossAxisCoord = coords[crossAxis];\n if (checkMainAxis) {\n const minSide = mainAxis === 'y' ? 'top' : 'left';\n const maxSide = mainAxis === 'y' ? 'bottom' : 'right';\n const min = mainAxisCoord + overflow[minSide];\n const max = mainAxisCoord - overflow[maxSide];\n mainAxisCoord = clamp(min, mainAxisCoord, max);\n }\n if (checkCrossAxis) {\n const minSide = crossAxis === 'y' ? 'top' : 'left';\n const maxSide = crossAxis === 'y' ? 'bottom' : 'right';\n const min = crossAxisCoord + overflow[minSide];\n const max = crossAxisCoord - overflow[maxSide];\n crossAxisCoord = clamp(min, crossAxisCoord, max);\n }\n const limitedCoords = limiter.fn({\n ...state,\n [mainAxis]: mainAxisCoord,\n [crossAxis]: crossAxisCoord\n });\n return {\n ...limitedCoords,\n data: {\n x: limitedCoords.x - x,\n y: limitedCoords.y - y\n }\n };\n }\n };\n};\n/**\n * Built-in `limiter` that will stop `shift()` at a certain point.\n */\nconst limitShift = function (options) {\n if (options === void 0) {\n options = {};\n }\n return {\n options,\n fn(state) {\n const {\n x,\n y,\n placement,\n rects,\n middlewareData\n } = state;\n const {\n offset = 0,\n mainAxis: checkMainAxis = true,\n crossAxis: checkCrossAxis = true\n } = evaluate(options, state);\n const coords = {\n x,\n y\n };\n const crossAxis = getSideAxis(placement);\n const mainAxis = getOppositeAxis(crossAxis);\n let mainAxisCoord = coords[mainAxis];\n let crossAxisCoord = coords[crossAxis];\n const rawOffset = evaluate(offset, state);\n const computedOffset = typeof rawOffset === 'number' ? {\n mainAxis: rawOffset,\n crossAxis: 0\n } : {\n mainAxis: 0,\n crossAxis: 0,\n ...rawOffset\n };\n if (checkMainAxis) {\n const len = mainAxis === 'y' ? 'height' : 'width';\n const limitMin = rects.reference[mainAxis] - rects.floating[len] + computedOffset.mainAxis;\n const limitMax = rects.reference[mainAxis] + rects.reference[len] - computedOffset.mainAxis;\n if (mainAxisCoord < limitMin) {\n mainAxisCoord = limitMin;\n } else if (mainAxisCoord > limitMax) {\n mainAxisCoord = limitMax;\n }\n }\n if (checkCrossAxis) {\n var _middlewareData$offse, _middlewareData$offse2;\n const len = mainAxis === 'y' ? 'width' : 'height';\n const isOriginSide = ['top', 'left'].includes(getSide(placement));\n const limitMin = rects.reference[crossAxis] - rects.floating[len] + (isOriginSide ? ((_middlewareData$offse = middlewareData.offset) == null ? void 0 : _middlewareData$offse[crossAxis]) || 0 : 0) + (isOriginSide ? 0 : computedOffset.crossAxis);\n const limitMax = rects.reference[crossAxis] + rects.reference[len] + (isOriginSide ? 0 : ((_middlewareData$offse2 = middlewareData.offset) == null ? void 0 : _middlewareData$offse2[crossAxis]) || 0) - (isOriginSide ? computedOffset.crossAxis : 0);\n if (crossAxisCoord < limitMin) {\n crossAxisCoord = limitMin;\n } else if (crossAxisCoord > limitMax) {\n crossAxisCoord = limitMax;\n }\n }\n return {\n [mainAxis]: mainAxisCoord,\n [crossAxis]: crossAxisCoord\n };\n }\n };\n};\n\n/**\n * Provides data that allows you to change the size of the floating element \u2014\n * for instance, prevent it from overflowing the clipping boundary or match the\n * width of the reference element.\n * @see https://floating-ui.com/docs/size\n */\nconst size = function (options) {\n if (options === void 0) {\n options = {};\n }\n return {\n name: 'size',\n options,\n async fn(state) {\n const {\n placement,\n rects,\n platform,\n elements\n } = state;\n const {\n apply = () => {},\n ...detectOverflowOptions\n } = evaluate(options, state);\n const overflow = await detectOverflow(state, detectOverflowOptions);\n const side = getSide(placement);\n const alignment = getAlignment(placement);\n const isYAxis = getSideAxis(placement) === 'y';\n const {\n width,\n height\n } = rects.floating;\n let heightSide;\n let widthSide;\n if (side === 'top' || side === 'bottom') {\n heightSide = side;\n widthSide = alignment === ((await (platform.isRTL == null ? void 0 : platform.isRTL(elements.floating))) ? 'start' : 'end') ? 'left' : 'right';\n } else {\n widthSide = side;\n heightSide = alignment === 'end' ? 'top' : 'bottom';\n }\n const maximumClippingHeight = height - overflow.top - overflow.bottom;\n const maximumClippingWidth = width - overflow.left - overflow.right;\n const overflowAvailableHeight = min(height - overflow[heightSide], maximumClippingHeight);\n const overflowAvailableWidth = min(width - overflow[widthSide], maximumClippingWidth);\n const noShift = !state.middlewareData.shift;\n let availableHeight = overflowAvailableHeight;\n let availableWidth = overflowAvailableWidth;\n if (isYAxis) {\n availableWidth = alignment || noShift ? min(overflowAvailableWidth, maximumClippingWidth) : maximumClippingWidth;\n } else {\n availableHeight = alignment || noShift ? min(overflowAvailableHeight, maximumClippingHeight) : maximumClippingHeight;\n }\n if (noShift && !alignment) {\n const xMin = max(overflow.left, 0);\n const xMax = max(overflow.right, 0);\n const yMin = max(overflow.top, 0);\n const yMax = max(overflow.bottom, 0);\n if (isYAxis) {\n availableWidth = width - 2 * (xMin !== 0 || xMax !== 0 ? xMin + xMax : max(overflow.left, overflow.right));\n } else {\n availableHeight = height - 2 * (yMin !== 0 || yMax !== 0 ? yMin + yMax : max(overflow.top, overflow.bottom));\n }\n }\n await apply({\n ...state,\n availableWidth,\n availableHeight\n });\n const nextDimensions = await platform.getDimensions(elements.floating);\n if (width !== nextDimensions.width || height !== nextDimensions.height) {\n return {\n reset: {\n rects: true\n }\n };\n }\n return {};\n }\n };\n};\n\nexport { arrow, autoPlacement, computePosition, detectOverflow, flip, hide, inline, limitShift, offset, shift, size };\n", "function getNodeName(node) {\n if (isNode(node)) {\n return (node.nodeName || '').toLowerCase();\n }\n // Mocked nodes in testing environments may not be instances of Node. By\n // returning `#document` an infinite loop won't occur.\n // https://github.com/floating-ui/floating-ui/issues/2317\n return '#document';\n}\nfunction getWindow(node) {\n var _node$ownerDocument;\n return (node == null || (_node$ownerDocument = node.ownerDocument) == null ? void 0 : _node$ownerDocument.defaultView) || window;\n}\nfunction getDocumentElement(node) {\n var _ref;\n return (_ref = (isNode(node) ? node.ownerDocument : node.document) || window.document) == null ? void 0 : _ref.documentElement;\n}\nfunction isNode(value) {\n return value instanceof Node || value instanceof getWindow(value).Node;\n}\nfunction isElement(value) {\n return value instanceof Element || value instanceof getWindow(value).Element;\n}\nfunction isHTMLElement(value) {\n return value instanceof HTMLElement || value instanceof getWindow(value).HTMLElement;\n}\nfunction isShadowRoot(value) {\n // Browsers without `ShadowRoot` support.\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n return value instanceof ShadowRoot || value instanceof getWindow(value).ShadowRoot;\n}\nfunction isOverflowElement(element) {\n const {\n overflow,\n overflowX,\n overflowY,\n display\n } = getComputedStyle(element);\n return /auto|scroll|overlay|hidden|clip/.test(overflow + overflowY + overflowX) && !['inline', 'contents'].includes(display);\n}\nfunction isTableElement(element) {\n return ['table', 'td', 'th'].includes(getNodeName(element));\n}\nfunction isTopLayer(element) {\n return [':popover-open', ':modal'].some(selector => {\n try {\n return element.matches(selector);\n } catch (e) {\n return false;\n }\n });\n}\nfunction isContainingBlock(elementOrCss) {\n const webkit = isWebKit();\n const css = isElement(elementOrCss) ? getComputedStyle(elementOrCss) : elementOrCss;\n\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n return css.transform !== 'none' || css.perspective !== 'none' || (css.containerType ? css.containerType !== 'normal' : false) || !webkit && (css.backdropFilter ? css.backdropFilter !== 'none' : false) || !webkit && (css.filter ? css.filter !== 'none' : false) || ['transform', 'perspective', 'filter'].some(value => (css.willChange || '').includes(value)) || ['paint', 'layout', 'strict', 'content'].some(value => (css.contain || '').includes(value));\n}\nfunction getContainingBlock(element) {\n let currentNode = getParentNode(element);\n while (isHTMLElement(currentNode) && !isLastTraversableNode(currentNode)) {\n if (isContainingBlock(currentNode)) {\n return currentNode;\n } else if (isTopLayer(currentNode)) {\n return null;\n }\n currentNode = getParentNode(currentNode);\n }\n return null;\n}\nfunction isWebKit() {\n if (typeof CSS === 'undefined' || !CSS.supports) return false;\n return CSS.supports('-webkit-backdrop-filter', 'none');\n}\nfunction isLastTraversableNode(node) {\n return ['html', 'body', '#document'].includes(getNodeName(node));\n}\nfunction getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}\nfunction getNodeScroll(element) {\n if (isElement(element)) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n }\n return {\n scrollLeft: element.scrollX,\n scrollTop: element.scrollY\n };\n}\nfunction getParentNode(node) {\n if (getNodeName(node) === 'html') {\n return node;\n }\n const result =\n // Step into the shadow DOM of the parent of a slotted node.\n node.assignedSlot ||\n // DOM Element detected.\n node.parentNode ||\n // ShadowRoot detected.\n isShadowRoot(node) && node.host ||\n // Fallback.\n getDocumentElement(node);\n return isShadowRoot(result) ? result.host : result;\n}\nfunction getNearestOverflowAncestor(node) {\n const parentNode = getParentNode(node);\n if (isLastTraversableNode(parentNode)) {\n return node.ownerDocument ? node.ownerDocument.body : node.body;\n }\n if (isHTMLElement(parentNode) && isOverflowElement(parentNode)) {\n return parentNode;\n }\n return getNearestOverflowAncestor(parentNode);\n}\nfunction getOverflowAncestors(node, list, traverseIframes) {\n var _node$ownerDocument2;\n if (list === void 0) {\n list = [];\n }\n if (traverseIframes === void 0) {\n traverseIframes = true;\n }\n const scrollableAncestor = getNearestOverflowAncestor(node);\n const isBody = scrollableAncestor === ((_node$ownerDocument2 = node.ownerDocument) == null ? void 0 : _node$ownerDocument2.body);\n const win = getWindow(scrollableAncestor);\n if (isBody) {\n const frameElement = getFrameElement(win);\n return list.concat(win, win.visualViewport || [], isOverflowElement(scrollableAncestor) ? scrollableAncestor : [], frameElement && traverseIframes ? getOverflowAncestors(frameElement) : []);\n }\n return list.concat(scrollableAncestor, getOverflowAncestors(scrollableAncestor, [], traverseIframes));\n}\nfunction getFrameElement(win) {\n return win.parent && Object.getPrototypeOf(win.parent) ? win.frameElement : null;\n}\n\nexport { getComputedStyle, getContainingBlock, getDocumentElement, getFrameElement, getNearestOverflowAncestor, getNodeName, getNodeScroll, getOverflowAncestors, getParentNode, getWindow, isContainingBlock, isElement, isHTMLElement, isLastTraversableNode, isNode, isOverflowElement, isShadowRoot, isTableElement, isTopLayer, isWebKit };\n", "import { rectToClientRect, detectOverflow as detectOverflow$1, offset as offset$1, autoPlacement as autoPlacement$1, shift as shift$1, flip as flip$1, size as size$1, hide as hide$1, arrow as arrow$1, inline as inline$1, limitShift as limitShift$1, computePosition as computePosition$1 } from '@floating-ui/core';\nimport { round, createCoords, max, min, floor } from '@floating-ui/utils';\nimport { getComputedStyle, isHTMLElement, isElement, getWindow, isWebKit, getFrameElement, getDocumentElement, isTopLayer, getNodeName, isOverflowElement, getNodeScroll, getOverflowAncestors, getParentNode, isLastTraversableNode, isContainingBlock, isTableElement, getContainingBlock } from '@floating-ui/utils/dom';\nexport { getOverflowAncestors } from '@floating-ui/utils/dom';\n\nfunction getCssDimensions(element) {\n const css = getComputedStyle(element);\n // In testing environments, the `width` and `height` properties are empty\n // strings for SVG elements, returning NaN. Fallback to `0` in this case.\n let width = parseFloat(css.width) || 0;\n let height = parseFloat(css.height) || 0;\n const hasOffset = isHTMLElement(element);\n const offsetWidth = hasOffset ? element.offsetWidth : width;\n const offsetHeight = hasOffset ? element.offsetHeight : height;\n const shouldFallback = round(width) !== offsetWidth || round(height) !== offsetHeight;\n if (shouldFallback) {\n width = offsetWidth;\n height = offsetHeight;\n }\n return {\n width,\n height,\n $: shouldFallback\n };\n}\n\nfunction unwrapElement(element) {\n return !isElement(element) ? element.contextElement : element;\n}\n\nfunction getScale(element) {\n const domElement = unwrapElement(element);\n if (!isHTMLElement(domElement)) {\n return createCoords(1);\n }\n const rect = domElement.getBoundingClientRect();\n const {\n width,\n height,\n $\n } = getCssDimensions(domElement);\n let x = ($ ? round(rect.width) : rect.width) / width;\n let y = ($ ? round(rect.height) : rect.height) / height;\n\n // 0, NaN, or Infinity should always fallback to 1.\n\n if (!x || !Number.isFinite(x)) {\n x = 1;\n }\n if (!y || !Number.isFinite(y)) {\n y = 1;\n }\n return {\n x,\n y\n };\n}\n\nconst noOffsets = /*#__PURE__*/createCoords(0);\nfunction getVisualOffsets(element) {\n const win = getWindow(element);\n if (!isWebKit() || !win.visualViewport) {\n return noOffsets;\n }\n return {\n x: win.visualViewport.offsetLeft,\n y: win.visualViewport.offsetTop\n };\n}\nfunction shouldAddVisualOffsets(element, isFixed, floatingOffsetParent) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n if (!floatingOffsetParent || isFixed && floatingOffsetParent !== getWindow(element)) {\n return false;\n }\n return isFixed;\n}\n\nfunction getBoundingClientRect(element, includeScale, isFixedStrategy, offsetParent) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n const clientRect = element.getBoundingClientRect();\n const domElement = unwrapElement(element);\n let scale = createCoords(1);\n if (includeScale) {\n if (offsetParent) {\n if (isElement(offsetParent)) {\n scale = getScale(offsetParent);\n }\n } else {\n scale = getScale(element);\n }\n }\n const visualOffsets = shouldAddVisualOffsets(domElement, isFixedStrategy, offsetParent) ? getVisualOffsets(domElement) : createCoords(0);\n let x = (clientRect.left + visualOffsets.x) / scale.x;\n let y = (clientRect.top + visualOffsets.y) / scale.y;\n let width = clientRect.width / scale.x;\n let height = clientRect.height / scale.y;\n if (domElement) {\n const win = getWindow(domElement);\n const offsetWin = offsetParent && isElement(offsetParent) ? getWindow(offsetParent) : offsetParent;\n let currentWin = win;\n let currentIFrame = getFrameElement(currentWin);\n while (currentIFrame && offsetParent && offsetWin !== currentWin) {\n const iframeScale = getScale(currentIFrame);\n const iframeRect = currentIFrame.getBoundingClientRect();\n const css = getComputedStyle(currentIFrame);\n const left = iframeRect.left + (currentIFrame.clientLeft + parseFloat(css.paddingLeft)) * iframeScale.x;\n const top = iframeRect.top + (currentIFrame.clientTop + parseFloat(css.paddingTop)) * iframeScale.y;\n x *= iframeScale.x;\n y *= iframeScale.y;\n width *= iframeScale.x;\n height *= iframeScale.y;\n x += left;\n y += top;\n currentWin = getWindow(currentIFrame);\n currentIFrame = getFrameElement(currentWin);\n }\n }\n return rectToClientRect({\n width,\n height,\n x,\n y\n });\n}\n\nfunction convertOffsetParentRelativeRectToViewportRelativeRect(_ref) {\n let {\n elements,\n rect,\n offsetParent,\n strategy\n } = _ref;\n const isFixed = strategy === 'fixed';\n const documentElement = getDocumentElement(offsetParent);\n const topLayer = elements ? isTopLayer(elements.floating) : false;\n if (offsetParent === documentElement || topLayer && isFixed) {\n return rect;\n }\n let scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n let scale = createCoords(1);\n const offsets = createCoords(0);\n const isOffsetParentAnElement = isHTMLElement(offsetParent);\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || isOverflowElement(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n if (isHTMLElement(offsetParent)) {\n const offsetRect = getBoundingClientRect(offsetParent);\n scale = getScale(offsetParent);\n offsets.x = offsetRect.x + offsetParent.clientLeft;\n offsets.y = offsetRect.y + offsetParent.clientTop;\n }\n }\n return {\n width: rect.width * scale.x,\n height: rect.height * scale.y,\n x: rect.x * scale.x - scroll.scrollLeft * scale.x + offsets.x,\n y: rect.y * scale.y - scroll.scrollTop * scale.y + offsets.y\n };\n}\n\nfunction getClientRects(element) {\n return Array.from(element.getClientRects());\n}\n\nfunction getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n return getBoundingClientRect(getDocumentElement(element)).left + getNodeScroll(element).scrollLeft;\n}\n\n// Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable.\nfunction getDocumentRect(element) {\n const html = getDocumentElement(element);\n const scroll = getNodeScroll(element);\n const body = element.ownerDocument.body;\n const width = max(html.scrollWidth, html.clientWidth, body.scrollWidth, body.clientWidth);\n const height = max(html.scrollHeight, html.clientHeight, body.scrollHeight, body.clientHeight);\n let x = -scroll.scrollLeft + getWindowScrollBarX(element);\n const y = -scroll.scrollTop;\n if (getComputedStyle(body).direction === 'rtl') {\n x += max(html.clientWidth, body.clientWidth) - width;\n }\n return {\n width,\n height,\n x,\n y\n };\n}\n\nfunction getViewportRect(element, strategy) {\n const win = getWindow(element);\n const html = getDocumentElement(element);\n const visualViewport = win.visualViewport;\n let width = html.clientWidth;\n let height = html.clientHeight;\n let x = 0;\n let y = 0;\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n const visualViewportBased = isWebKit();\n if (!visualViewportBased || visualViewportBased && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n return {\n width,\n height,\n x,\n y\n };\n}\n\n// Returns the inner client rect, subtracting scrollbars if present.\nfunction getInnerBoundingClientRect(element, strategy) {\n const clientRect = getBoundingClientRect(element, true, strategy === 'fixed');\n const top = clientRect.top + element.clientTop;\n const left = clientRect.left + element.clientLeft;\n const scale = isHTMLElement(element) ? getScale(element) : createCoords(1);\n const width = element.clientWidth * scale.x;\n const height = element.clientHeight * scale.y;\n const x = left * scale.x;\n const y = top * scale.y;\n return {\n width,\n height,\n x,\n y\n };\n}\nfunction getClientRectFromClippingAncestor(element, clippingAncestor, strategy) {\n let rect;\n if (clippingAncestor === 'viewport') {\n rect = getViewportRect(element, strategy);\n } else if (clippingAncestor === 'document') {\n rect = getDocumentRect(getDocumentElement(element));\n } else if (isElement(clippingAncestor)) {\n rect = getInnerBoundingClientRect(clippingAncestor, strategy);\n } else {\n const visualOffsets = getVisualOffsets(element);\n rect = {\n ...clippingAncestor,\n x: clippingAncestor.x - visualOffsets.x,\n y: clippingAncestor.y - visualOffsets.y\n };\n }\n return rectToClientRect(rect);\n}\nfunction hasFixedPositionAncestor(element, stopNode) {\n const parentNode = getParentNode(element);\n if (parentNode === stopNode || !isElement(parentNode) || isLastTraversableNode(parentNode)) {\n return false;\n }\n return getComputedStyle(parentNode).position === 'fixed' || hasFixedPositionAncestor(parentNode, stopNode);\n}\n\n// A \"clipping ancestor\" is an `overflow` element with the characteristic of\n// clipping (or hiding) child elements. This returns all clipping ancestors\n// of the given element up the tree.\nfunction getClippingElementAncestors(element, cache) {\n const cachedResult = cache.get(element);\n if (cachedResult) {\n return cachedResult;\n }\n let result = getOverflowAncestors(element, [], false).filter(el => isElement(el) && getNodeName(el) !== 'body');\n let currentContainingBlockComputedStyle = null;\n const elementIsFixed = getComputedStyle(element).position === 'fixed';\n let currentNode = elementIsFixed ? getParentNode(element) : element;\n\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n while (isElement(currentNode) && !isLastTraversableNode(currentNode)) {\n const computedStyle = getComputedStyle(currentNode);\n const currentNodeIsContaining = isContainingBlock(currentNode);\n if (!currentNodeIsContaining && computedStyle.position === 'fixed') {\n currentContainingBlockComputedStyle = null;\n }\n const shouldDropCurrentNode = elementIsFixed ? !currentNodeIsContaining && !currentContainingBlockComputedStyle : !currentNodeIsContaining && computedStyle.position === 'static' && !!currentContainingBlockComputedStyle && ['absolute', 'fixed'].includes(currentContainingBlockComputedStyle.position) || isOverflowElement(currentNode) && !currentNodeIsContaining && hasFixedPositionAncestor(element, currentNode);\n if (shouldDropCurrentNode) {\n // Drop non-containing blocks.\n result = result.filter(ancestor => ancestor !== currentNode);\n } else {\n // Record last containing block for next iteration.\n currentContainingBlockComputedStyle = computedStyle;\n }\n currentNode = getParentNode(currentNode);\n }\n cache.set(element, result);\n return result;\n}\n\n// Gets the maximum area that the element is visible in due to any number of\n// clipping ancestors.\nfunction getClippingRect(_ref) {\n let {\n element,\n boundary,\n rootBoundary,\n strategy\n } = _ref;\n const elementClippingAncestors = boundary === 'clippingAncestors' ? isTopLayer(element) ? [] : getClippingElementAncestors(element, this._c) : [].concat(boundary);\n const clippingAncestors = [...elementClippingAncestors, rootBoundary];\n const firstClippingAncestor = clippingAncestors[0];\n const clippingRect = clippingAncestors.reduce((accRect, clippingAncestor) => {\n const rect = getClientRectFromClippingAncestor(element, clippingAncestor, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromClippingAncestor(element, firstClippingAncestor, strategy));\n return {\n width: clippingRect.right - clippingRect.left,\n height: clippingRect.bottom - clippingRect.top,\n x: clippingRect.left,\n y: clippingRect.top\n };\n}\n\nfunction getDimensions(element) {\n const {\n width,\n height\n } = getCssDimensions(element);\n return {\n width,\n height\n };\n}\n\nfunction getRectRelativeToOffsetParent(element, offsetParent, strategy) {\n const isOffsetParentAnElement = isHTMLElement(offsetParent);\n const documentElement = getDocumentElement(offsetParent);\n const isFixed = strategy === 'fixed';\n const rect = getBoundingClientRect(element, true, isFixed, offsetParent);\n let scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n const offsets = createCoords(0);\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || isOverflowElement(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n if (isOffsetParentAnElement) {\n const offsetRect = getBoundingClientRect(offsetParent, true, isFixed, offsetParent);\n offsets.x = offsetRect.x + offsetParent.clientLeft;\n offsets.y = offsetRect.y + offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n const x = rect.left + scroll.scrollLeft - offsets.x;\n const y = rect.top + scroll.scrollTop - offsets.y;\n return {\n x,\n y,\n width: rect.width,\n height: rect.height\n };\n}\n\nfunction isStaticPositioned(element) {\n return getComputedStyle(element).position === 'static';\n}\n\nfunction getTrueOffsetParent(element, polyfill) {\n if (!isHTMLElement(element) || getComputedStyle(element).position === 'fixed') {\n return null;\n }\n if (polyfill) {\n return polyfill(element);\n }\n return element.offsetParent;\n}\n\n// Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\nfunction getOffsetParent(element, polyfill) {\n const win = getWindow(element);\n if (isTopLayer(element)) {\n return win;\n }\n if (!isHTMLElement(element)) {\n let svgOffsetParent = getParentNode(element);\n while (svgOffsetParent && !isLastTraversableNode(svgOffsetParent)) {\n if (isElement(svgOffsetParent) && !isStaticPositioned(svgOffsetParent)) {\n return svgOffsetParent;\n }\n svgOffsetParent = getParentNode(svgOffsetParent);\n }\n return win;\n }\n let offsetParent = getTrueOffsetParent(element, polyfill);\n while (offsetParent && isTableElement(offsetParent) && isStaticPositioned(offsetParent)) {\n offsetParent = getTrueOffsetParent(offsetParent, polyfill);\n }\n if (offsetParent && isLastTraversableNode(offsetParent) && isStaticPositioned(offsetParent) && !isContainingBlock(offsetParent)) {\n return win;\n }\n return offsetParent || getContainingBlock(element) || win;\n}\n\nconst getElementRects = async function (data) {\n const getOffsetParentFn = this.getOffsetParent || getOffsetParent;\n const getDimensionsFn = this.getDimensions;\n const floatingDimensions = await getDimensionsFn(data.floating);\n return {\n reference: getRectRelativeToOffsetParent(data.reference, await getOffsetParentFn(data.floating), data.strategy),\n floating: {\n x: 0,\n y: 0,\n width: floatingDimensions.width,\n height: floatingDimensions.height\n }\n };\n};\n\nfunction isRTL(element) {\n return getComputedStyle(element).direction === 'rtl';\n}\n\nconst platform = {\n convertOffsetParentRelativeRectToViewportRelativeRect,\n getDocumentElement,\n getClippingRect,\n getOffsetParent,\n getElementRects,\n getClientRects,\n getDimensions,\n getScale,\n isElement,\n isRTL\n};\n\n// https://samthor.au/2021/observing-dom/\nfunction observeMove(element, onMove) {\n let io = null;\n let timeoutId;\n const root = getDocumentElement(element);\n function cleanup() {\n var _io;\n clearTimeout(timeoutId);\n (_io = io) == null || _io.disconnect();\n io = null;\n }\n function refresh(skip, threshold) {\n if (skip === void 0) {\n skip = false;\n }\n if (threshold === void 0) {\n threshold = 1;\n }\n cleanup();\n const {\n left,\n top,\n width,\n height\n } = element.getBoundingClientRect();\n if (!skip) {\n onMove();\n }\n if (!width || !height) {\n return;\n }\n const insetTop = floor(top);\n const insetRight = floor(root.clientWidth - (left + width));\n const insetBottom = floor(root.clientHeight - (top + height));\n const insetLeft = floor(left);\n const rootMargin = -insetTop + \"px \" + -insetRight + \"px \" + -insetBottom + \"px \" + -insetLeft + \"px\";\n const options = {\n rootMargin,\n threshold: max(0, min(1, threshold)) || 1\n };\n let isFirstUpdate = true;\n function handleObserve(entries) {\n const ratio = entries[0].intersectionRatio;\n if (ratio !== threshold) {\n if (!isFirstUpdate) {\n return refresh();\n }\n if (!ratio) {\n // If the reference is clipped, the ratio is 0. Throttle the refresh\n // to prevent an infinite loop of updates.\n timeoutId = setTimeout(() => {\n refresh(false, 1e-7);\n }, 1000);\n } else {\n refresh(false, ratio);\n }\n }\n isFirstUpdate = false;\n }\n\n // Older browsers don't support a `document` as the root and will throw an\n // error.\n try {\n io = new IntersectionObserver(handleObserve, {\n ...options,\n // Handle