Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "MediaData.h"
8
9
#include "ImageContainer.h"
10
#include "MediaInfo.h"
11
#include "VideoUtils.h"
12
#include "YCbCrUtils.h"
13
#include "mozilla/layers/ImageBridgeChild.h"
14
#include "mozilla/layers/KnowsCompositor.h"
15
#include "mozilla/layers/SharedRGBImage.h"
16
17
#include <stdint.h>
18
19
#ifdef XP_WIN
20
# include "mozilla/WindowsVersion.h"
21
# include "mozilla/layers/D3D11YCbCrImage.h"
22
#endif
23
24
namespace mozilla {
25
26
using namespace mozilla::gfx;
27
using layers::ImageContainer;
28
using layers::PlanarYCbCrData;
29
using layers::PlanarYCbCrImage;
30
using media::TimeUnit;
31
32
const char* AudioData::sTypeName = "audio";
33
const char* VideoData::sTypeName = "video";
34
35
bool IsDataLoudnessHearable(const AudioDataValue aData) {
36
// We can transfer the digital value to dBFS via following formula. According
37
// to American SMPTE standard, 0 dBu equals -20 dBFS. In theory 0 dBu is still
38
// hearable, so we choose a smaller value as our threshold. If the loudness
39
// is under this threshold, it might not be hearable.
40
return 20.0f * std::log10(AudioSampleToFloat(aData)) > -100;
41
}
42
43
AudioData::AudioData(int64_t aOffset, const media::TimeUnit& aTime,
44
AlignedAudioBuffer&& aData, uint32_t aChannels,
45
uint32_t aRate, uint32_t aChannelMap)
46
: MediaData(sType, aOffset, aTime,
47
FramesToTimeUnit(aData.Length() / aChannels, aRate)),
48
mChannels(aChannels),
49
mChannelMap(aChannelMap),
50
mRate(aRate),
51
mOriginalTime(aTime),
52
mAudioData(std::move(aData)),
53
mFrames(mAudioData.Length() / aChannels) {}
54
55
Span<AudioDataValue> AudioData::Data() const {
56
return MakeSpan(GetAdjustedData(), mFrames * mChannels);
57
}
58
59
bool AudioData::AdjustForStartTime(const media::TimeUnit& aStartTime) {
60
mOriginalTime -= aStartTime;
61
if (mTrimWindow) {
62
*mTrimWindow -= aStartTime;
63
}
64
return MediaData::AdjustForStartTime(aStartTime) && mOriginalTime.IsValid();
65
}
66
67
bool AudioData::SetTrimWindow(const media::TimeInterval& aTrim) {
68
MOZ_DIAGNOSTIC_ASSERT(aTrim.mStart.IsValid() && aTrim.mEnd.IsValid(),
69
"An overflow occurred on the provided TimeInterval");
70
if (!mAudioData) {
71
// MoveableData got called. Can no longer work on it.
72
return false;
73
}
74
const size_t originalFrames = mAudioData.Length() / mChannels;
75
const TimeUnit originalDuration = FramesToTimeUnit(originalFrames, mRate);
76
if (aTrim.mStart < mOriginalTime ||
77
aTrim.mEnd > mOriginalTime + originalDuration) {
78
return false;
79
}
80
81
auto trimBefore = TimeUnitToFrames(aTrim.mStart - mOriginalTime, mRate);
82
auto trimAfter = aTrim.mEnd == GetEndTime()
83
? originalFrames
84
: TimeUnitToFrames(aTrim.mEnd - mOriginalTime, mRate);
85
if (!trimBefore.isValid() || !trimAfter.isValid()) {
86
// Overflow.
87
return false;
88
}
89
MOZ_DIAGNOSTIC_ASSERT(trimAfter.value() >= trimBefore.value(),
90
"Something went wrong with trimming value");
91
if (!mTrimWindow && trimBefore == 0 && trimAfter == originalFrames) {
92
// Nothing to change, abort early to prevent rounding errors.
93
return true;
94
}
95
96
mTrimWindow = Some(aTrim);
97
mDataOffset = trimBefore.value() * mChannels;
98
MOZ_DIAGNOSTIC_ASSERT(mDataOffset <= mAudioData.Length(),
99
"Data offset outside original buffer");
100
mFrames = (trimAfter - trimBefore).value();
101
MOZ_DIAGNOSTIC_ASSERT(mFrames <= originalFrames,
102
"More frames than found in container");
103
mTime = mOriginalTime + FramesToTimeUnit(trimBefore.value(), mRate);
104
mDuration = FramesToTimeUnit(mFrames, mRate);
105
106
return true;
107
}
108
109
AudioDataValue* AudioData::GetAdjustedData() const {
110
if (!mAudioData) {
111
return nullptr;
112
}
113
return mAudioData.Data() + mDataOffset;
114
}
115
116
void AudioData::EnsureAudioBuffer() {
117
if (mAudioBuffer || !mAudioData) {
118
return;
119
}
120
const AudioDataValue* srcData = GetAdjustedData();
121
mAudioBuffer =
122
SharedBuffer::Create(mFrames * mChannels * sizeof(AudioDataValue));
123
124
AudioDataValue* destData = static_cast<AudioDataValue*>(mAudioBuffer->Data());
125
for (uint32_t i = 0; i < mFrames; ++i) {
126
for (uint32_t j = 0; j < mChannels; ++j) {
127
destData[j * mFrames + i] = srcData[i * mChannels + j];
128
}
129
}
130
}
131
132
size_t AudioData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
133
size_t size =
134
aMallocSizeOf(this) + mAudioData.SizeOfExcludingThis(aMallocSizeOf);
135
if (mAudioBuffer) {
136
size += mAudioBuffer->SizeOfIncludingThis(aMallocSizeOf);
137
}
138
return size;
139
}
140
141
bool AudioData::IsAudible() const {
142
if (!mAudioData) {
143
return false;
144
}
145
146
const AudioDataValue* data = GetAdjustedData();
147
148
for (uint32_t frame = 0; frame < mFrames; ++frame) {
149
for (uint32_t channel = 0; channel < mChannels; ++channel) {
150
if (IsDataLoudnessHearable(data[frame * mChannels + channel])) {
151
return true;
152
}
153
}
154
}
155
return false;
156
}
157
158
AlignedAudioBuffer AudioData::MoveableData() {
159
// Trim buffer according to trimming mask.
160
mAudioData.PopFront(mDataOffset);
161
mAudioData.SetLength(mFrames * mChannels);
162
mDataOffset = 0;
163
mFrames = 0;
164
mTrimWindow.reset();
165
return std::move(mAudioData);
166
}
167
168
static bool ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane) {
169
return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
170
aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
171
aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
172
aPlane.mStride > 0 && aPlane.mWidth <= aPlane.mStride;
173
}
174
175
static bool ValidateBufferAndPicture(const VideoData::YCbCrBuffer& aBuffer,
176
const IntRect& aPicture) {
177
// The following situation should never happen unless there is a bug
178
// in the decoder
179
if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
180
aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
181
NS_ERROR("C planes with different sizes");
182
return false;
183
}
184
185
// The following situations could be triggered by invalid input
186
if (aPicture.width <= 0 || aPicture.height <= 0) {
187
NS_WARNING("Empty picture rect");
188
return false;
189
}
190
if (!ValidatePlane(aBuffer.mPlanes[0]) ||
191
!ValidatePlane(aBuffer.mPlanes[1]) ||
192
!ValidatePlane(aBuffer.mPlanes[2])) {
193
NS_WARNING("Invalid plane size");
194
return false;
195
}
196
197
// Ensure the picture size specified in the headers can be extracted out of
198
// the frame we've been supplied without indexing out of bounds.
199
CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
200
CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
201
if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
202
!yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight) {
203
// The specified picture dimensions can't be contained inside the video
204
// frame, we'll stomp memory if we try to copy it. Fail.
205
NS_WARNING("Overflowing picture rect");
206
return false;
207
}
208
return true;
209
}
210
211
VideoData::VideoData(int64_t aOffset, const TimeUnit& aTime,
212
const TimeUnit& aDuration, bool aKeyframe,
213
const TimeUnit& aTimecode, IntSize aDisplay,
214
layers::ImageContainer::FrameID aFrameID)
215
: MediaData(Type::VIDEO_DATA, aOffset, aTime, aDuration),
216
mDisplay(aDisplay),
217
mFrameID(aFrameID),
218
mSentToCompositor(false),
219
mNextKeyFrameTime(TimeUnit::Invalid()) {
220
MOZ_ASSERT(!mDuration.IsNegative(), "Frame must have non-negative duration.");
221
mKeyframe = aKeyframe;
222
mTimecode = aTimecode;
223
}
224
225
VideoData::~VideoData() {}
226
227
void VideoData::SetListener(UniquePtr<Listener> aListener) {
228
MOZ_ASSERT(!mSentToCompositor,
229
"Listener should be registered before sending data");
230
231
mListener = std::move(aListener);
232
}
233
234
void VideoData::MarkSentToCompositor() {
235
if (mSentToCompositor) {
236
return;
237
}
238
239
mSentToCompositor = true;
240
if (mListener != nullptr) {
241
mListener->OnSentToCompositor();
242
mListener = nullptr;
243
}
244
}
245
246
size_t VideoData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
247
size_t size = aMallocSizeOf(this);
248
249
// Currently only PLANAR_YCBCR has a well defined function for determining
250
// it's size, so reporting is limited to that type.
251
if (mImage && mImage->GetFormat() == ImageFormat::PLANAR_YCBCR) {
252
const mozilla::layers::PlanarYCbCrImage* img =
253
static_cast<const mozilla::layers::PlanarYCbCrImage*>(mImage.get());
254
size += img->SizeOfIncludingThis(aMallocSizeOf);
255
}
256
257
return size;
258
}
259
260
void VideoData::UpdateDuration(const TimeUnit& aDuration) {
261
MOZ_ASSERT(!aDuration.IsNegative());
262
mDuration = aDuration;
263
}
264
265
void VideoData::UpdateTimestamp(const TimeUnit& aTimestamp) {
266
MOZ_ASSERT(!aTimestamp.IsNegative());
267
268
auto updatedDuration = GetEndTime() - aTimestamp;
269
MOZ_ASSERT(!updatedDuration.IsNegative());
270
271
mTime = aTimestamp;
272
mDuration = updatedDuration;
273
}
274
275
PlanarYCbCrData ConstructPlanarYCbCrData(const VideoInfo& aInfo,
276
const VideoData::YCbCrBuffer& aBuffer,
277
const IntRect& aPicture) {
278
const VideoData::YCbCrBuffer::Plane& Y = aBuffer.mPlanes[0];
279
const VideoData::YCbCrBuffer::Plane& Cb = aBuffer.mPlanes[1];
280
const VideoData::YCbCrBuffer::Plane& Cr = aBuffer.mPlanes[2];
281
282
PlanarYCbCrData data;
283
data.mYChannel = Y.mData + Y.mOffset;
284
data.mYSize = IntSize(Y.mWidth, Y.mHeight);
285
data.mYStride = Y.mStride;
286
data.mYSkip = Y.mSkip;
287
data.mCbChannel = Cb.mData + Cb.mOffset;
288
data.mCrChannel = Cr.mData + Cr.mOffset;
289
data.mCbCrSize = IntSize(Cb.mWidth, Cb.mHeight);
290
data.mCbCrStride = Cb.mStride;
291
data.mCbSkip = Cb.mSkip;
292
data.mCrSkip = Cr.mSkip;
293
data.mPicX = aPicture.x;
294
data.mPicY = aPicture.y;
295
data.mPicSize = aPicture.Size();
296
data.mStereoMode = aInfo.mStereoMode;
297
data.mYUVColorSpace = aBuffer.mYUVColorSpace;
298
data.mColorDepth = aBuffer.mColorDepth;
299
return data;
300
}
301
302
/* static */
303
bool VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
304
const VideoInfo& aInfo,
305
const YCbCrBuffer& aBuffer,
306
const IntRect& aPicture, bool aCopyData) {
307
if (!aVideoImage) {
308
return false;
309
}
310
311
PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
312
313
aVideoImage->SetDelayedConversion(true);
314
if (aCopyData) {
315
return aVideoImage->CopyData(data);
316
} else {
317
return aVideoImage->AdoptData(data);
318
}
319
}
320
321
/* static */
322
already_AddRefed<VideoData> VideoData::CreateAndCopyData(
323
const VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset,
324
const TimeUnit& aTime, const TimeUnit& aDuration,
325
const YCbCrBuffer& aBuffer, bool aKeyframe, const TimeUnit& aTimecode,
326
const IntRect& aPicture, layers::KnowsCompositor* aAllocator) {
327
if (!aContainer) {
328
// Create a dummy VideoData with no image. This gives us something to
329
// send to media streams if necessary.
330
RefPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe,
331
aTimecode, aInfo.mDisplay, 0));
332
return v.forget();
333
}
334
335
if (!ValidateBufferAndPicture(aBuffer, aPicture)) {
336
return nullptr;
337
}
338
339
RefPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe,
340
aTimecode, aInfo.mDisplay, 0));
341
342
// Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
343
// format.
344
#if XP_WIN
345
// We disable this code path on Windows version earlier of Windows 8 due to
346
// intermittent crashes with old drivers. See bug 1405110.
347
// D3D11YCbCrImage can only handle YCbCr images using 3 non-interleaved planes
348
// non-zero mSkip value indicates that one of the plane would be interleaved.
349
if (IsWin8OrLater() && !XRE_IsParentProcess() && aAllocator &&
350
aAllocator->SupportsD3D11() && aBuffer.mPlanes[0].mSkip == 0 &&
351
aBuffer.mPlanes[1].mSkip == 0 && aBuffer.mPlanes[2].mSkip == 0) {
352
RefPtr<layers::D3D11YCbCrImage> d3d11Image = new layers::D3D11YCbCrImage();
353
PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
354
if (d3d11Image->SetData(layers::ImageBridgeChild::GetSingleton()
355
? layers::ImageBridgeChild::GetSingleton().get()
356
: aAllocator,
357
aContainer, data)) {
358
v->mImage = d3d11Image;
359
return v.forget();
360
}
361
}
362
#endif
363
if (!v->mImage) {
364
v->mImage = aContainer->CreatePlanarYCbCrImage();
365
}
366
367
if (!v->mImage) {
368
return nullptr;
369
}
370
NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::PLANAR_YCBCR,
371
"Wrong format?");
372
PlanarYCbCrImage* videoImage = v->mImage->AsPlanarYCbCrImage();
373
MOZ_ASSERT(videoImage);
374
375
if (!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
376
true /* aCopyData */)) {
377
return nullptr;
378
}
379
380
return v.forget();
381
}
382
383
/* static */
384
already_AddRefed<VideoData> VideoData::CreateAndCopyData(
385
const VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset,
386
const TimeUnit& aTime, const TimeUnit& aDuration,
387
const YCbCrBuffer& aBuffer, const YCbCrBuffer::Plane& aAlphaPlane,
388
bool aKeyframe, const TimeUnit& aTimecode, const IntRect& aPicture) {
389
if (!aContainer) {
390
// Create a dummy VideoData with no image. This gives us something to
391
// send to media streams if necessary.
392
RefPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe,
393
aTimecode, aInfo.mDisplay, 0));
394
return v.forget();
395
}
396
397
if (!ValidateBufferAndPicture(aBuffer, aPicture)) {
398
return nullptr;
399
}
400
401
RefPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe,
402
aTimecode, aInfo.mDisplay, 0));
403
404
// Convert from YUVA to BGRA format on the software side.
405
RefPtr<layers::SharedRGBImage> videoImage =
406
aContainer->CreateSharedRGBImage();
407
v->mImage = videoImage;
408
409
if (!v->mImage) {
410
return nullptr;
411
}
412
if (!videoImage->Allocate(
413
IntSize(aBuffer.mPlanes[0].mWidth, aBuffer.mPlanes[0].mHeight),
414
SurfaceFormat::B8G8R8A8)) {
415
return nullptr;
416
}
417
418
RefPtr<layers::TextureClient> texture =
419
videoImage->GetTextureClient(/* aKnowsCompositor */ nullptr);
420
if (!texture) {
421
NS_WARNING("Failed to allocate TextureClient");
422
return nullptr;
423
}
424
425
layers::TextureClientAutoLock autoLock(texture,
426
layers::OpenMode::OPEN_WRITE_ONLY);
427
if (!autoLock.Succeeded()) {
428
NS_WARNING("Failed to lock TextureClient");
429
return nullptr;
430
}
431
432
layers::MappedTextureData buffer;
433
if (!texture->BorrowMappedData(buffer)) {
434
NS_WARNING("Failed to borrow mapped data");
435
return nullptr;
436
}
437
438
// The naming convention for libyuv and associated utils is word-order.
439
// The naming convention in the gfx stack is byte-order.
440
ConvertYCbCrAToARGB(aBuffer.mPlanes[0].mData, aBuffer.mPlanes[1].mData,
441
aBuffer.mPlanes[2].mData, aAlphaPlane.mData,
442
aBuffer.mPlanes[0].mStride, aBuffer.mPlanes[1].mStride,
443
buffer.data, buffer.stride, buffer.size.width,
444
buffer.size.height);
445
446
return v.forget();
447
}
448
449
/* static */
450
already_AddRefed<VideoData> VideoData::CreateFromImage(
451
const IntSize& aDisplay, int64_t aOffset, const TimeUnit& aTime,
452
const TimeUnit& aDuration, const RefPtr<Image>& aImage, bool aKeyframe,
453
const TimeUnit& aTimecode) {
454
RefPtr<VideoData> v(new VideoData(aOffset, aTime, aDuration, aKeyframe,
455
aTimecode, aDisplay, 0));
456
v->mImage = aImage;
457
return v.forget();
458
}
459
460
MediaRawData::MediaRawData()
461
: MediaData(Type::RAW_DATA), mCrypto(mCryptoInternal) {}
462
463
MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize)
464
: MediaData(Type::RAW_DATA),
465
mCrypto(mCryptoInternal),
466
mBuffer(aData, aSize) {}
467
468
MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize,
469
const uint8_t* aAlphaData, size_t aAlphaSize)
470
: MediaData(Type::RAW_DATA),
471
mCrypto(mCryptoInternal),
472
mBuffer(aData, aSize),
473
mAlphaBuffer(aAlphaData, aAlphaSize) {}
474
475
already_AddRefed<MediaRawData> MediaRawData::Clone() const {
476
RefPtr<MediaRawData> s = new MediaRawData;
477
s->mTimecode = mTimecode;
478
s->mTime = mTime;
479
s->mDuration = mDuration;
480
s->mOffset = mOffset;
481
s->mKeyframe = mKeyframe;
482
s->mExtraData = mExtraData;
483
s->mCryptoInternal = mCryptoInternal;
484
s->mTrackInfo = mTrackInfo;
485
s->mEOS = mEOS;
486
s->mOriginalPresentationWindow = mOriginalPresentationWindow;
487
if (!s->mBuffer.Append(mBuffer.Data(), mBuffer.Length())) {
488
return nullptr;
489
}
490
if (!s->mAlphaBuffer.Append(mAlphaBuffer.Data(), mAlphaBuffer.Length())) {
491
return nullptr;
492
}
493
return s.forget();
494
}
495
496
MediaRawData::~MediaRawData() {}
497
498
size_t MediaRawData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
499
size_t size = aMallocSizeOf(this);
500
size += mBuffer.SizeOfExcludingThis(aMallocSizeOf);
501
return size;
502
}
503
504
UniquePtr<MediaRawDataWriter> MediaRawData::CreateWriter() {
505
UniquePtr<MediaRawDataWriter> p(new MediaRawDataWriter(this));
506
return p;
507
}
508
509
MediaRawDataWriter::MediaRawDataWriter(MediaRawData* aMediaRawData)
510
: mCrypto(aMediaRawData->mCryptoInternal), mTarget(aMediaRawData) {}
511
512
bool MediaRawDataWriter::SetSize(size_t aSize) {
513
return mTarget->mBuffer.SetLength(aSize);
514
}
515
516
bool MediaRawDataWriter::Prepend(const uint8_t* aData, size_t aSize) {
517
return mTarget->mBuffer.Prepend(aData, aSize);
518
}
519
520
bool MediaRawDataWriter::Append(const uint8_t* aData, size_t aSize) {
521
return mTarget->mBuffer.Append(aData, aSize);
522
}
523
524
bool MediaRawDataWriter::Replace(const uint8_t* aData, size_t aSize) {
525
return mTarget->mBuffer.Replace(aData, aSize);
526
}
527
528
void MediaRawDataWriter::Clear() { mTarget->mBuffer.Clear(); }
529
530
uint8_t* MediaRawDataWriter::Data() { return mTarget->mBuffer.Data(); }
531
532
size_t MediaRawDataWriter::Size() { return mTarget->Size(); }
533
534
void MediaRawDataWriter::PopFront(size_t aSize) {
535
mTarget->mBuffer.PopFront(aSize);
536
}
537
538
} // namespace mozilla