Home Reference Source

src/controller/timeline-controller.ts

  1. import { Events } from '../events';
  2. import Cea608Parser, { CaptionScreen } from '../utils/cea-608-parser';
  3. import OutputFilter from '../utils/output-filter';
  4. import { parseWebVTT } from '../utils/webvtt-parser';
  5. import {
  6. sendAddTrackEvent,
  7. clearCurrentCues,
  8. addCueToTrack,
  9. removeCuesInRange,
  10. } from '../utils/texttrack-utils';
  11. import { parseIMSC1, IMSC1_CODEC } from '../utils/imsc1-ttml-parser';
  12. import { PlaylistLevelType } from '../types/loader';
  13. import { Fragment } from '../loader/fragment';
  14. import {
  15. FragParsingUserdataData,
  16. FragLoadedData,
  17. FragDecryptedData,
  18. MediaAttachingData,
  19. ManifestLoadedData,
  20. InitPTSFoundData,
  21. SubtitleTracksUpdatedData,
  22. BufferFlushingData,
  23. FragLoadingData,
  24. } from '../types/events';
  25. import { logger } from '../utils/logger';
  26. import type Hls from '../hls';
  27. import type { ComponentAPI } from '../types/component-api';
  28. import type { HlsConfig } from '../config';
  29. import type { CuesInterface } from '../utils/cues';
  30. import type { MediaPlaylist } from '../types/media-playlist';
  31. import type { VTTCCs } from '../types/vtt';
  32.  
  33. type TrackProperties = {
  34. label: string;
  35. languageCode: string;
  36. media?: MediaPlaylist;
  37. };
  38.  
  39. type NonNativeCaptionsTrack = {
  40. _id?: string;
  41. label: string;
  42. kind: string;
  43. default: boolean;
  44. closedCaptions?: MediaPlaylist;
  45. subtitleTrack?: MediaPlaylist;
  46. };
  47.  
  48. export class TimelineController implements ComponentAPI {
  49. private hls: Hls;
  50. private media: HTMLMediaElement | null = null;
  51. private config: HlsConfig;
  52. private enabled: boolean = true;
  53. private Cues: CuesInterface;
  54. private textTracks: Array<TextTrack> = [];
  55. private tracks: Array<MediaPlaylist> = [];
  56. private initPTS: Array<number> = [];
  57. private timescale: Array<number> = [];
  58. private unparsedVttFrags: Array<FragLoadedData | FragDecryptedData> = [];
  59. private captionsTracks: Record<string, TextTrack> = {};
  60. private nonNativeCaptionsTracks: Record<string, NonNativeCaptionsTrack> = {};
  61. private cea608Parser1!: Cea608Parser;
  62. private cea608Parser2!: Cea608Parser;
  63. private lastSn: number = -1;
  64. private lastPartIndex: number = -1;
  65. private prevCC: number = -1;
  66. private vttCCs: VTTCCs = newVTTCCs();
  67. private captionsProperties: {
  68. textTrack1: TrackProperties;
  69. textTrack2: TrackProperties;
  70. textTrack3: TrackProperties;
  71. textTrack4: TrackProperties;
  72. };
  73.  
  74. constructor(hls: Hls) {
  75. this.hls = hls;
  76. this.config = hls.config;
  77. this.Cues = hls.config.cueHandler;
  78.  
  79. this.captionsProperties = {
  80. textTrack1: {
  81. label: this.config.captionsTextTrack1Label,
  82. languageCode: this.config.captionsTextTrack1LanguageCode,
  83. },
  84. textTrack2: {
  85. label: this.config.captionsTextTrack2Label,
  86. languageCode: this.config.captionsTextTrack2LanguageCode,
  87. },
  88. textTrack3: {
  89. label: this.config.captionsTextTrack3Label,
  90. languageCode: this.config.captionsTextTrack3LanguageCode,
  91. },
  92. textTrack4: {
  93. label: this.config.captionsTextTrack4Label,
  94. languageCode: this.config.captionsTextTrack4LanguageCode,
  95. },
  96. };
  97.  
  98. if (this.config.enableCEA708Captions) {
  99. const channel1 = new OutputFilter(this, 'textTrack1');
  100. const channel2 = new OutputFilter(this, 'textTrack2');
  101. const channel3 = new OutputFilter(this, 'textTrack3');
  102. const channel4 = new OutputFilter(this, 'textTrack4');
  103. this.cea608Parser1 = new Cea608Parser(1, channel1, channel2);
  104. this.cea608Parser2 = new Cea608Parser(3, channel3, channel4);
  105. }
  106.  
  107. hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  108. hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  109. hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  110. hls.on(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  111. hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  112. hls.on(Events.FRAG_LOADING, this.onFragLoading, this);
  113. hls.on(Events.FRAG_LOADED, this.onFragLoaded, this);
  114. hls.on(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  115. hls.on(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  116. hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  117. hls.on(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  118. hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  119. }
  120.  
  121. public destroy(): void {
  122. const { hls } = this;
  123. hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  124. hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  125. hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  126. hls.off(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  127. hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  128. hls.off(Events.FRAG_LOADING, this.onFragLoading, this);
  129. hls.off(Events.FRAG_LOADED, this.onFragLoaded, this);
  130. hls.off(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  131. hls.off(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  132. hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  133. hls.off(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  134. hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  135. // @ts-ignore
  136. this.hls = this.config = this.cea608Parser1 = this.cea608Parser2 = null;
  137. }
  138.  
  139. public addCues(
  140. trackName: string,
  141. startTime: number,
  142. endTime: number,
  143. screen: CaptionScreen,
  144. cueRanges: Array<[number, number]>
  145. ) {
  146. // skip cues which overlap more than 50% with previously parsed time ranges
  147. let merged = false;
  148. for (let i = cueRanges.length; i--; ) {
  149. const cueRange = cueRanges[i];
  150. const overlap = intersection(
  151. cueRange[0],
  152. cueRange[1],
  153. startTime,
  154. endTime
  155. );
  156. if (overlap >= 0) {
  157. cueRange[0] = Math.min(cueRange[0], startTime);
  158. cueRange[1] = Math.max(cueRange[1], endTime);
  159. merged = true;
  160. if (overlap / (endTime - startTime) > 0.5) {
  161. return;
  162. }
  163. }
  164. }
  165. if (!merged) {
  166. cueRanges.push([startTime, endTime]);
  167. }
  168.  
  169. if (this.config.renderTextTracksNatively) {
  170. const track = this.captionsTracks[trackName];
  171. this.Cues.newCue(track, startTime, endTime, screen);
  172. } else {
  173. const cues = this.Cues.newCue(null, startTime, endTime, screen);
  174. this.hls.trigger(Events.CUES_PARSED, {
  175. type: 'captions',
  176. cues,
  177. track: trackName,
  178. });
  179. }
  180. }
  181.  
  182. // Triggered when an initial PTS is found; used for synchronisation of WebVTT.
  183. private onInitPtsFound(
  184. event: Events.INIT_PTS_FOUND,
  185. { frag, id, initPTS, timescale }: InitPTSFoundData
  186. ) {
  187. const { unparsedVttFrags } = this;
  188. if (id === 'main') {
  189. this.initPTS[frag.cc] = initPTS;
  190. this.timescale[frag.cc] = timescale;
  191. }
  192.  
  193. // Due to asynchronous processing, initial PTS may arrive later than the first VTT fragments are loaded.
  194. // Parse any unparsed fragments upon receiving the initial PTS.
  195. if (unparsedVttFrags.length) {
  196. this.unparsedVttFrags = [];
  197. unparsedVttFrags.forEach((frag) => {
  198. this.onFragLoaded(Events.FRAG_LOADED, frag as FragLoadedData);
  199. });
  200. }
  201. }
  202.  
  203. private getExistingTrack(trackName: string): TextTrack | null {
  204. const { media } = this;
  205. if (media) {
  206. for (let i = 0; i < media.textTracks.length; i++) {
  207. const textTrack = media.textTracks[i];
  208. if (textTrack[trackName]) {
  209. return textTrack;
  210. }
  211. }
  212. }
  213. return null;
  214. }
  215.  
  216. public createCaptionsTrack(trackName: string) {
  217. if (this.config.renderTextTracksNatively) {
  218. this.createNativeTrack(trackName);
  219. } else {
  220. this.createNonNativeTrack(trackName);
  221. }
  222. }
  223.  
  224. private createNativeTrack(trackName: string) {
  225. if (this.captionsTracks[trackName]) {
  226. return;
  227. }
  228. const { captionsProperties, captionsTracks, media } = this;
  229. const { label, languageCode } = captionsProperties[trackName];
  230. // Enable reuse of existing text track.
  231. const existingTrack = this.getExistingTrack(trackName);
  232. if (!existingTrack) {
  233. const textTrack = this.createTextTrack('captions', label, languageCode);
  234. if (textTrack) {
  235. // Set a special property on the track so we know it's managed by Hls.js
  236. textTrack[trackName] = true;
  237. captionsTracks[trackName] = textTrack;
  238. }
  239. } else {
  240. captionsTracks[trackName] = existingTrack;
  241. clearCurrentCues(captionsTracks[trackName]);
  242. sendAddTrackEvent(captionsTracks[trackName], media as HTMLMediaElement);
  243. }
  244. }
  245.  
  246. private createNonNativeTrack(trackName: string) {
  247. if (this.nonNativeCaptionsTracks[trackName]) {
  248. return;
  249. }
  250. // Create a list of a single track for the provider to consume
  251. const trackProperties: TrackProperties = this.captionsProperties[trackName];
  252. if (!trackProperties) {
  253. return;
  254. }
  255. const label = trackProperties.label as string;
  256. const track = {
  257. _id: trackName,
  258. label,
  259. kind: 'captions',
  260. default: trackProperties.media ? !!trackProperties.media.default : false,
  261. closedCaptions: trackProperties.media,
  262. };
  263. this.nonNativeCaptionsTracks[trackName] = track;
  264. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, { tracks: [track] });
  265. }
  266.  
  267. private createTextTrack(
  268. kind: TextTrackKind,
  269. label: string,
  270. lang?: string
  271. ): TextTrack | undefined {
  272. const media = this.media;
  273. if (!media) {
  274. return;
  275. }
  276. return media.addTextTrack(kind, label, lang);
  277. }
  278.  
  279. private onMediaAttaching(
  280. event: Events.MEDIA_ATTACHING,
  281. data: MediaAttachingData
  282. ) {
  283. this.media = data.media;
  284. this._cleanTracks();
  285. }
  286.  
  287. private onMediaDetaching() {
  288. const { captionsTracks } = this;
  289. Object.keys(captionsTracks).forEach((trackName) => {
  290. clearCurrentCues(captionsTracks[trackName]);
  291. delete captionsTracks[trackName];
  292. });
  293. this.nonNativeCaptionsTracks = {};
  294. }
  295.  
  296. private onManifestLoading() {
  297. this.lastSn = -1; // Detect discontinuity in fragment parsing
  298. this.lastPartIndex = -1;
  299. this.prevCC = -1;
  300. this.vttCCs = newVTTCCs(); // Detect discontinuity in subtitle manifests
  301. this._cleanTracks();
  302. this.tracks = [];
  303. this.captionsTracks = {};
  304. this.nonNativeCaptionsTracks = {};
  305. this.textTracks = [];
  306. this.unparsedVttFrags = this.unparsedVttFrags || [];
  307. this.initPTS = [];
  308. this.timescale = [];
  309. if (this.cea608Parser1 && this.cea608Parser2) {
  310. this.cea608Parser1.reset();
  311. this.cea608Parser2.reset();
  312. }
  313. }
  314.  
  315. private _cleanTracks() {
  316. // clear outdated subtitles
  317. const { media } = this;
  318. if (!media) {
  319. return;
  320. }
  321. const textTracks = media.textTracks;
  322. if (textTracks) {
  323. for (let i = 0; i < textTracks.length; i++) {
  324. clearCurrentCues(textTracks[i]);
  325. }
  326. }
  327. }
  328.  
  329. private onSubtitleTracksUpdated(
  330. event: Events.SUBTITLE_TRACKS_UPDATED,
  331. data: SubtitleTracksUpdatedData
  332. ) {
  333. this.textTracks = [];
  334. const tracks: Array<MediaPlaylist> = data.subtitleTracks || [];
  335. const hasIMSC1 = tracks.some((track) => track.textCodec === IMSC1_CODEC);
  336. if (this.config.enableWebVTT || (hasIMSC1 && this.config.enableIMSC1)) {
  337. const sameTracks =
  338. this.tracks && tracks && this.tracks.length === tracks.length;
  339. this.tracks = tracks || [];
  340.  
  341. if (this.config.renderTextTracksNatively) {
  342. const inUseTracks = this.media ? this.media.textTracks : [];
  343.  
  344. this.tracks.forEach((track, index) => {
  345. let textTrack: TextTrack | undefined;
  346. if (index < inUseTracks.length) {
  347. let inUseTrack: TextTrack | null = null;
  348.  
  349. for (let i = 0; i < inUseTracks.length; i++) {
  350. if (canReuseVttTextTrack(inUseTracks[i], track)) {
  351. inUseTrack = inUseTracks[i];
  352. break;
  353. }
  354. }
  355.  
  356. // Reuse tracks with the same label, but do not reuse 608/708 tracks
  357. if (inUseTrack) {
  358. textTrack = inUseTrack;
  359. }
  360. }
  361. if (textTrack) {
  362. clearCurrentCues(textTrack);
  363. } else {
  364. textTrack = this.createTextTrack(
  365. 'subtitles',
  366. track.name,
  367. track.lang
  368. );
  369. if (textTrack) {
  370. textTrack.mode = 'disabled';
  371. }
  372. }
  373. if (textTrack) {
  374. (textTrack as any).groupId = track.groupId;
  375. this.textTracks.push(textTrack);
  376. }
  377. });
  378. } else if (!sameTracks && this.tracks && this.tracks.length) {
  379. // Create a list of tracks for the provider to consume
  380. const tracksList = this.tracks.map((track) => {
  381. return {
  382. label: track.name,
  383. kind: track.type.toLowerCase(),
  384. default: track.default,
  385. subtitleTrack: track,
  386. };
  387. });
  388. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, {
  389. tracks: tracksList,
  390. });
  391. }
  392. }
  393. }
  394.  
  395. private onManifestLoaded(
  396. event: Events.MANIFEST_LOADED,
  397. data: ManifestLoadedData
  398. ) {
  399. if (this.config.enableCEA708Captions && data.captions) {
  400. data.captions.forEach((captionsTrack) => {
  401. const instreamIdMatch = /(?:CC|SERVICE)([1-4])/.exec(
  402. captionsTrack.instreamId as string
  403. );
  404. if (!instreamIdMatch) {
  405. return;
  406. }
  407. const trackName = `textTrack${instreamIdMatch[1]}`;
  408. const trackProperties: TrackProperties =
  409. this.captionsProperties[trackName];
  410. if (!trackProperties) {
  411. return;
  412. }
  413. trackProperties.label = captionsTrack.name;
  414. if (captionsTrack.lang) {
  415. // optional attribute
  416. trackProperties.languageCode = captionsTrack.lang;
  417. }
  418. trackProperties.media = captionsTrack;
  419. });
  420. }
  421. }
  422.  
  423. private onFragLoading(event: Events.FRAG_LOADING, data: FragLoadingData) {
  424. const { cea608Parser1, cea608Parser2, lastSn, lastPartIndex } = this;
  425. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  426. return;
  427. }
  428. // if this frag isn't contiguous, clear the parser so cues with bad start/end times aren't added to the textTrack
  429. if (data.frag.type === PlaylistLevelType.MAIN) {
  430. const sn = data.frag.sn;
  431. const partIndex = data?.part?.index ?? -1;
  432. if (
  433. !(
  434. sn === lastSn + 1 ||
  435. (sn === lastSn && partIndex === lastPartIndex + 1)
  436. )
  437. ) {
  438. cea608Parser1.reset();
  439. cea608Parser2.reset();
  440. }
  441. this.lastSn = sn as number;
  442. this.lastPartIndex = partIndex;
  443. }
  444. }
  445.  
  446. private onFragLoaded(
  447. event: Events.FRAG_LOADED,
  448. data: FragDecryptedData | FragLoadedData
  449. ) {
  450. const { frag, payload } = data;
  451. const { initPTS, unparsedVttFrags } = this;
  452. if (frag.type === PlaylistLevelType.SUBTITLE) {
  453. // If fragment is subtitle type, parse as WebVTT.
  454. if (payload.byteLength) {
  455. // We need an initial synchronisation PTS. Store fragments as long as none has arrived.
  456. if (!Number.isFinite(initPTS[frag.cc])) {
  457. unparsedVttFrags.push(data);
  458. if (initPTS.length) {
  459. // finish unsuccessfully, otherwise the subtitle-stream-controller could be blocked from loading new frags.
  460. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  461. success: false,
  462. frag,
  463. error: new Error('Missing initial subtitle PTS'),
  464. });
  465. }
  466. return;
  467. }
  468.  
  469. const decryptData = frag.decryptdata;
  470. // fragment after decryption has a stats object
  471. const decrypted = 'stats' in data;
  472. // If the subtitles are not encrypted, parse VTTs now. Otherwise, we need to wait.
  473. if (
  474. decryptData == null ||
  475. decryptData.key == null ||
  476. decryptData.method !== 'AES-128' ||
  477. decrypted
  478. ) {
  479. const trackPlaylistMedia = this.tracks[frag.level];
  480. const vttCCs = this.vttCCs;
  481. if (!vttCCs[frag.cc]) {
  482. vttCCs[frag.cc] = {
  483. start: frag.start,
  484. prevCC: this.prevCC,
  485. new: true,
  486. };
  487. this.prevCC = frag.cc;
  488. }
  489. if (
  490. trackPlaylistMedia &&
  491. trackPlaylistMedia.textCodec === IMSC1_CODEC
  492. ) {
  493. this._parseIMSC1(frag, payload);
  494. } else {
  495. this._parseVTTs(frag, payload, vttCCs);
  496. }
  497. }
  498. } else {
  499. // In case there is no payload, finish unsuccessfully.
  500. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  501. success: false,
  502. frag,
  503. error: new Error('Empty subtitle payload'),
  504. });
  505. }
  506. }
  507. }
  508.  
  509. private _parseIMSC1(frag: Fragment, payload: ArrayBuffer) {
  510. const hls = this.hls;
  511. parseIMSC1(
  512. payload,
  513. this.initPTS[frag.cc],
  514. this.timescale[frag.cc],
  515. (cues) => {
  516. this._appendCues(cues, frag.level);
  517. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  518. success: true,
  519. frag: frag,
  520. });
  521. },
  522. (error) => {
  523. logger.log(`Failed to parse IMSC1: ${error}`);
  524. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  525. success: false,
  526. frag: frag,
  527. error,
  528. });
  529. }
  530. );
  531. }
  532.  
  533. private _parseVTTs(frag: Fragment, payload: ArrayBuffer, vttCCs: any) {
  534. const hls = this.hls;
  535. // Parse the WebVTT file contents.
  536. parseWebVTT(
  537. payload,
  538. this.initPTS[frag.cc],
  539. this.timescale[frag.cc],
  540. vttCCs,
  541. frag.cc,
  542. frag.start,
  543. (cues) => {
  544. this._appendCues(cues, frag.level);
  545. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  546. success: true,
  547. frag: frag,
  548. });
  549. },
  550. (error) => {
  551. this._fallbackToIMSC1(frag, payload);
  552. // Something went wrong while parsing. Trigger event with success false.
  553. logger.log(`Failed to parse VTT cue: ${error}`);
  554. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  555. success: false,
  556. frag: frag,
  557. error,
  558. });
  559. }
  560. );
  561. }
  562.  
  563. private _fallbackToIMSC1(frag: Fragment, payload: ArrayBuffer) {
  564. // If textCodec is unknown, try parsing as IMSC1. Set textCodec based on the result
  565. const trackPlaylistMedia = this.tracks[frag.level];
  566. if (!trackPlaylistMedia.textCodec) {
  567. parseIMSC1(
  568. payload,
  569. this.initPTS[frag.cc],
  570. this.timescale[frag.cc],
  571. () => {
  572. trackPlaylistMedia.textCodec = IMSC1_CODEC;
  573. this._parseIMSC1(frag, payload);
  574. },
  575. () => {
  576. trackPlaylistMedia.textCodec = 'wvtt';
  577. }
  578. );
  579. }
  580. }
  581.  
  582. private _appendCues(cues: VTTCue[], fragLevel: number) {
  583. const hls = this.hls;
  584. if (this.config.renderTextTracksNatively) {
  585. const textTrack = this.textTracks[fragLevel];
  586. // WebVTTParser.parse is an async method and if the currently selected text track mode is set to "disabled"
  587. // before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
  588. // and trying to access getCueById method of cues will throw an exception
  589. // Because we check if the mode is disabled, we can force check `cues` below. They can't be null.
  590. if (!textTrack || textTrack.mode === 'disabled') {
  591. return;
  592. }
  593. cues.forEach((cue) => addCueToTrack(textTrack, cue));
  594. } else {
  595. const currentTrack = this.tracks[fragLevel];
  596. if (!currentTrack) {
  597. return;
  598. }
  599. const track = currentTrack.default ? 'default' : 'subtitles' + fragLevel;
  600. hls.trigger(Events.CUES_PARSED, { type: 'subtitles', cues, track });
  601. }
  602. }
  603.  
  604. private onFragDecrypted(
  605. event: Events.FRAG_DECRYPTED,
  606. data: FragDecryptedData
  607. ) {
  608. const { frag } = data;
  609. if (frag.type === PlaylistLevelType.SUBTITLE) {
  610. if (!Number.isFinite(this.initPTS[frag.cc])) {
  611. this.unparsedVttFrags.push(data as unknown as FragLoadedData);
  612. return;
  613. }
  614. this.onFragLoaded(Events.FRAG_LOADED, data as unknown as FragLoadedData);
  615. }
  616. }
  617.  
  618. private onSubtitleTracksCleared() {
  619. this.tracks = [];
  620. this.captionsTracks = {};
  621. }
  622.  
  623. private onFragParsingUserdata(
  624. event: Events.FRAG_PARSING_USERDATA,
  625. data: FragParsingUserdataData
  626. ) {
  627. const { cea608Parser1, cea608Parser2 } = this;
  628. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  629. return;
  630. }
  631.  
  632. // If the event contains captions (found in the bytes property), push all bytes into the parser immediately
  633. // It will create the proper timestamps based on the PTS value
  634. for (let i = 0; i < data.samples.length; i++) {
  635. const ccBytes = data.samples[i].bytes;
  636. if (ccBytes) {
  637. const ccdatas = this.extractCea608Data(ccBytes);
  638. cea608Parser1.addData(data.samples[i].pts, ccdatas[0]);
  639. cea608Parser2.addData(data.samples[i].pts, ccdatas[1]);
  640. }
  641. }
  642. }
  643.  
  644. onBufferFlushing(
  645. event: Events.BUFFER_FLUSHING,
  646. { startOffset, endOffset, endOffsetSubtitles, type }: BufferFlushingData
  647. ) {
  648. const { media } = this;
  649. if (!media || media.currentTime < endOffset) {
  650. return;
  651. }
  652. // Clear 608 caption cues from the captions TextTracks when the video back buffer is flushed
  653. // Forward cues are never removed because we can loose streamed 608 content from recent fragments
  654. if (!type || type === 'video') {
  655. const { captionsTracks } = this;
  656. Object.keys(captionsTracks).forEach((trackName) =>
  657. removeCuesInRange(captionsTracks[trackName], startOffset, endOffset)
  658. );
  659. }
  660. if (this.config.renderTextTracksNatively) {
  661. // Clear VTT/IMSC1 subtitle cues from the subtitle TextTracks when the back buffer is flushed
  662. if (startOffset === 0 && endOffsetSubtitles !== undefined) {
  663. const { textTracks } = this;
  664. Object.keys(textTracks).forEach((trackName) =>
  665. removeCuesInRange(
  666. textTracks[trackName],
  667. startOffset,
  668. endOffsetSubtitles
  669. )
  670. );
  671. }
  672. }
  673. }
  674.  
  675. private extractCea608Data(byteArray: Uint8Array): number[][] {
  676. const actualCCBytes: number[][] = [[], []];
  677. const count = byteArray[0] & 0x1f;
  678. let position = 2;
  679.  
  680. for (let j = 0; j < count; j++) {
  681. const tmpByte = byteArray[position++];
  682. const ccbyte1 = 0x7f & byteArray[position++];
  683. const ccbyte2 = 0x7f & byteArray[position++];
  684. if (ccbyte1 === 0 && ccbyte2 === 0) {
  685. continue;
  686. }
  687. const ccValid = (0x04 & tmpByte) !== 0; // Support all four channels
  688. if (ccValid) {
  689. const ccType = 0x03 & tmpByte;
  690. if (
  691. 0x00 /* CEA608 field1*/ === ccType ||
  692. 0x01 /* CEA608 field2*/ === ccType
  693. ) {
  694. // Exclude CEA708 CC data.
  695. actualCCBytes[ccType].push(ccbyte1);
  696. actualCCBytes[ccType].push(ccbyte2);
  697. }
  698. }
  699. }
  700. return actualCCBytes;
  701. }
  702. }
  703.  
  704. function canReuseVttTextTrack(inUseTrack, manifestTrack): boolean {
  705. return (
  706. inUseTrack &&
  707. inUseTrack.label === manifestTrack.name &&
  708. !(inUseTrack.textTrack1 || inUseTrack.textTrack2)
  709. );
  710. }
  711.  
  712. function intersection(x1: number, x2: number, y1: number, y2: number): number {
  713. return Math.min(x2, y2) - Math.max(x1, y1);
  714. }
  715.  
  716. function newVTTCCs(): VTTCCs {
  717. return {
  718. ccOffset: 0,
  719. presentationOffset: 0,
  720. 0: {
  721. start: 0,
  722. prevCC: -1,
  723. new: false,
  724. },
  725. };
  726. }