Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/dom/VideoFrame.h"
#include <math.h>
#include <limits>
#include <utility>
#include "ImageContainer.h"
#include "ImageConversion.h"
#include "MediaResult.h"
#include "VideoColorSpace.h"
#include "WebCodecsUtils.h"
#include "js/StructuredClone.h"
#include "mozilla/Maybe.h"
#include "mozilla/ResultVariant.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/Try.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/dom/CanvasUtils.h"
#include "mozilla/dom/DOMRect.h"
#include "mozilla/dom/HTMLCanvasElement.h"
#include "mozilla/dom/HTMLImageElement.h"
#include "mozilla/dom/HTMLVideoElement.h"
#include "mozilla/dom/ImageBitmap.h"
#include "mozilla/dom/ImageUtils.h"
#include "mozilla/dom/OffscreenCanvas.h"
#include "mozilla/dom/Promise.h"
#include "mozilla/dom/SVGImageElement.h"
#include "mozilla/dom/StructuredCloneHolder.h"
#include "mozilla/dom/StructuredCloneTags.h"
#include "mozilla/dom/UnionTypes.h"
#include "mozilla/dom/VideoFrameBinding.h"
#include "mozilla/gfx/2D.h"
#include "mozilla/gfx/Swizzle.h"
#include "mozilla/layers/LayersSurfaces.h"
#include "nsIPrincipal.h"
#include "nsIURI.h"
#include "nsLayoutUtils.h"
extern mozilla::LazyLogModule gWebCodecsLog;
namespace mozilla::dom {
#ifdef LOG_INTERNAL
# undef LOG_INTERNAL
#endif // LOG_INTERNAL
#define LOG_INTERNAL(level, msg, ...) \
MOZ_LOG(gWebCodecsLog, LogLevel::level, (msg, ##__VA_ARGS__))
#ifdef LOG
# undef LOG
#endif // LOG
#define LOG(msg, ...) LOG_INTERNAL(Debug, msg, ##__VA_ARGS__)
#ifdef LOGW
# undef LOGW
#endif // LOGW
#define LOGW(msg, ...) LOG_INTERNAL(Warning, msg, ##__VA_ARGS__)
#ifdef LOGE
# undef LOGE
#endif // LOGE
#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__)
NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(VideoFrame)
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(VideoFrame)
tmp->CloseIfNeeded();
NS_IMPL_CYCLE_COLLECTION_UNLINK(mParent)
NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(VideoFrame)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParent)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTING_ADDREF(VideoFrame)
// VideoFrame should be released as soon as its refcount drops to zero,
// without waiting for async deletion by the cycle collector, since it may hold
// a large-size image.
NS_IMPL_CYCLE_COLLECTING_RELEASE_WITH_LAST_RELEASE(VideoFrame, CloseIfNeeded())
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(VideoFrame)
NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
NS_INTERFACE_MAP_ENTRY(nsISupports)
NS_INTERFACE_MAP_END
/*
* The following are helpers to read the image data from the given buffer and
* the format. The data layout is illustrated in the comments for
* `VideoFrame::Format` below.
*/
static int32_t CeilingOfHalf(int32_t aValue) {
MOZ_ASSERT(aValue >= 0);
return aValue / 2 + (aValue % 2);
}
class YUVBufferReaderBase {
public:
YUVBufferReaderBase(const Span<uint8_t>& aBuffer, int32_t aWidth,
int32_t aHeight)
: mWidth(aWidth), mHeight(aHeight), mStrideY(aWidth), mBuffer(aBuffer) {}
virtual ~YUVBufferReaderBase() = default;
const uint8_t* DataY() const { return mBuffer.data(); }
const int32_t mWidth;
const int32_t mHeight;
const int32_t mStrideY;
protected:
CheckedInt<size_t> YByteSize() const {
return CheckedInt<size_t>(mStrideY) * mHeight;
}
const Span<uint8_t> mBuffer;
};
class I420ABufferReader;
class I420BufferReader : public YUVBufferReaderBase {
public:
I420BufferReader(const Span<uint8_t>& aBuffer, int32_t aWidth,
int32_t aHeight)
: YUVBufferReaderBase(aBuffer, aWidth, aHeight),
mStrideU(CeilingOfHalf(aWidth)),
mStrideV(CeilingOfHalf(aWidth)) {}
virtual ~I420BufferReader() = default;
const uint8_t* DataU() const { return &mBuffer[YByteSize().value()]; }
const uint8_t* DataV() const {
return &mBuffer[YByteSize().value() + UByteSize().value()];
}
virtual I420ABufferReader* AsI420ABufferReader() { return nullptr; }
const int32_t mStrideU;
const int32_t mStrideV;
protected:
CheckedInt<size_t> UByteSize() const {
return CheckedInt<size_t>(CeilingOfHalf(mHeight)) * mStrideU;
}
CheckedInt<size_t> VSize() const {
return CheckedInt<size_t>(CeilingOfHalf(mHeight)) * mStrideV;
}
};
class I420ABufferReader final : public I420BufferReader {
public:
I420ABufferReader(const Span<uint8_t>& aBuffer, int32_t aWidth,
int32_t aHeight)
: I420BufferReader(aBuffer, aWidth, aHeight), mStrideA(aWidth) {
MOZ_ASSERT(mStrideA == mStrideY);
}
virtual ~I420ABufferReader() = default;
const uint8_t* DataA() const {
return &mBuffer[YByteSize().value() + UByteSize().value() +
VSize().value()];
}
virtual I420ABufferReader* AsI420ABufferReader() override { return this; }
const int32_t mStrideA;
};
class NV12BufferReader final : public YUVBufferReaderBase {
public:
NV12BufferReader(const Span<uint8_t>& aBuffer, int32_t aWidth,
int32_t aHeight)
: YUVBufferReaderBase(aBuffer, aWidth, aHeight),
mStrideUV(aWidth + aWidth % 2) {}
virtual ~NV12BufferReader() = default;
const uint8_t* DataUV() const { return &mBuffer[YByteSize().value()]; }
const int32_t mStrideUV;
};
/*
* The followings are helpers defined in
*/
static bool IsSameOrigin(nsIGlobalObject* aGlobal, const VideoFrame& aFrame) {
MOZ_ASSERT(aGlobal);
MOZ_ASSERT(aFrame.GetParentObject());
nsIPrincipal* principalX = aGlobal->PrincipalOrNull();
nsIPrincipal* principalY = aFrame.GetParentObject()->PrincipalOrNull();
// If both of VideoFrames are created in worker, they are in the same origin
// domain.
if (!principalX) {
return !principalY;
}
// Otherwise, check their domains.
return principalX->Equals(principalY);
}
static bool IsSameOrigin(nsIGlobalObject* aGlobal,
HTMLVideoElement& aVideoElement) {
MOZ_ASSERT(aGlobal);
// If CORS is in use, consider the video source is same-origin.
if (aVideoElement.GetCORSMode() != CORS_NONE) {
return true;
}
// Otherwise, check if video source has cross-origin redirect or not.
if (aVideoElement.HadCrossOriginRedirects()) {
return false;
}
// Finally, compare the VideoFrame's domain and video's one.
nsIPrincipal* principal = aGlobal->PrincipalOrNull();
nsCOMPtr<nsIPrincipal> elementPrincipal =
aVideoElement.GetCurrentVideoPrincipal();
// <video> cannot be created in worker, so it should have a valid principal.
if (NS_WARN_IF(!elementPrincipal) || !principal) {
return false;
}
return principal->Subsumes(elementPrincipal);
}
// A sub-helper to convert DOMRectInit to gfx::IntRect.
static Result<gfx::IntRect, nsCString> ToIntRect(const DOMRectInit& aRectInit) {
auto EQ = [](const double& a, const double& b) {
constexpr double e = std::numeric_limits<double>::epsilon();
return std::fabs(a - b) <= e;
};
auto GT = [&](const double& a, const double& b) {
return !EQ(a, b) && a > b;
};
// Make sure the double values are in the gfx::IntRect's valid range, before
// checking the spec's valid range. The double's infinity value is larger than
// gfx::IntRect's max value so it will be filtered out here.
constexpr double MAX = static_cast<double>(
std::numeric_limits<decltype(gfx::IntRect::x)>::max());
constexpr double MIN = static_cast<double>(
std::numeric_limits<decltype(gfx::IntRect::x)>::min());
if (GT(aRectInit.mX, MAX) || GT(MIN, aRectInit.mX)) {
return Err("x is out of the valid range"_ns);
}
if (GT(aRectInit.mY, MAX) || GT(MIN, aRectInit.mY)) {
return Err("y is out of the valid range"_ns);
}
if (GT(aRectInit.mWidth, MAX) || GT(MIN, aRectInit.mWidth)) {
return Err("width is out of the valid range"_ns);
}
if (GT(aRectInit.mHeight, MAX) || GT(MIN, aRectInit.mHeight)) {
return Err("height is out of the valid range"_ns);
}
gfx::IntRect rect(
static_cast<decltype(gfx::IntRect::x)>(aRectInit.mX),
static_cast<decltype(gfx::IntRect::y)>(aRectInit.mY),
static_cast<decltype(gfx::IntRect::width)>(aRectInit.mWidth),
static_cast<decltype(gfx::IntRect::height)>(aRectInit.mHeight));
// Check the spec's valid range.
if (rect.X() < 0) {
return Err("x must be non-negative"_ns);
}
if (rect.Y() < 0) {
return Err("y must be non-negative"_ns);
}
if (rect.Width() <= 0) {
return Err("width must be positive"_ns);
}
if (rect.Height() <= 0) {
return Err("height must be positive"_ns);
}
return rect;
}
// A sub-helper to convert a (width, height) pair to gfx::IntRect.
static Result<gfx::IntSize, nsCString> ToIntSize(const uint32_t& aWidth,
const uint32_t& aHeight) {
// Make sure the given values are in the gfx::IntSize's valid range, before
// checking the spec's valid range.
constexpr uint32_t MAX = static_cast<uint32_t>(
std::numeric_limits<decltype(gfx::IntRect::width)>::max());
if (aWidth > MAX) {
return Err("Width exceeds the implementation's range"_ns);
}
if (aHeight > MAX) {
return Err("Height exceeds the implementation's range"_ns);
}
gfx::IntSize size(static_cast<decltype(gfx::IntRect::width)>(aWidth),
static_cast<decltype(gfx::IntRect::height)>(aHeight));
// Check the spec's valid range.
if (size.Width() <= 0) {
return Err("Width must be positive"_ns);
}
if (size.Height() <= 0) {
return Err("Height must be positive"_ns);
}
return size;
}
// A sub-helper to make sure visible range is in the picture.
static Result<Ok, nsCString> ValidateVisibility(
const gfx::IntRect& aVisibleRect, const gfx::IntSize& aPicSize) {
MOZ_ASSERT(aVisibleRect.X() >= 0);
MOZ_ASSERT(aVisibleRect.Y() >= 0);
MOZ_ASSERT(aVisibleRect.Width() > 0);
MOZ_ASSERT(aVisibleRect.Height() > 0);
const auto w = CheckedInt<uint32_t>(aVisibleRect.Width()) + aVisibleRect.X();
if (w.value() > static_cast<uint32_t>(aPicSize.Width())) {
return Err(
"Sum of visible rectangle's x and width exceeds the picture's width"_ns);
}
const auto h = CheckedInt<uint32_t>(aVisibleRect.Height()) + aVisibleRect.Y();
if (h.value() > static_cast<uint32_t>(aPicSize.Height())) {
return Err(
"Sum of visible rectangle's y and height exceeds the picture's height"_ns);
}
return Ok();
}
// A sub-helper to check and get display{Width, Height} in
// VideoFrame(Buffer)Init.
template <class T>
static Result<Maybe<gfx::IntSize>, nsCString> MaybeGetDisplaySize(
const T& aInit) {
if (aInit.mDisplayWidth.WasPassed() != aInit.mDisplayHeight.WasPassed()) {
return Err(
"displayWidth and displayHeight cannot be set without the other"_ns);
}
Maybe<gfx::IntSize> displaySize;
if (aInit.mDisplayWidth.WasPassed() && aInit.mDisplayHeight.WasPassed()) {
displaySize.emplace();
MOZ_TRY_VAR(displaySize.ref(), ToIntSize(aInit.mDisplayWidth.Value(),
aInit.mDisplayHeight.Value())
.mapErr([](nsCString error) {
error.Insert("display", 0);
return error;
}));
}
return displaySize;
}
static Result<
std::tuple<gfx::IntSize, Maybe<gfx::IntRect>, Maybe<gfx::IntSize>>,
nsCString>
ValidateVideoFrameBufferInit(const VideoFrameBufferInit& aInit) {
gfx::IntSize codedSize;
MOZ_TRY_VAR(codedSize, ToIntSize(aInit.mCodedWidth, aInit.mCodedHeight)
.mapErr([](nsCString error) {
error.Insert("coded", 0);
return error;
}));
Maybe<gfx::IntRect> visibleRect;
if (aInit.mVisibleRect.WasPassed()) {
visibleRect.emplace();
MOZ_TRY_VAR(
visibleRect.ref(),
ToIntRect(aInit.mVisibleRect.Value()).mapErr([](nsCString error) {
error.Insert("visibleRect's ", 0);
return error;
}));
MOZ_TRY(ValidateVisibility(visibleRect.ref(), codedSize));
}
Maybe<gfx::IntSize> displaySize;
MOZ_TRY_VAR(displaySize, MaybeGetDisplaySize(aInit));
return std::make_tuple(codedSize, visibleRect, displaySize);
}
static Result<Ok, nsCString> VerifyRectOffsetAlignment(
const Maybe<VideoFrame::Format>& aFormat, const gfx::IntRect& aRect) {
if (!aFormat) {
return Ok();
}
for (const VideoFrame::Format::Plane& p : aFormat->Planes()) {
const gfx::IntSize sample = aFormat->SampleSize(p);
if (aRect.X() % sample.Width() != 0) {
return Err("Mismatch between format and given left offset"_ns);
}
if (aRect.Y() % sample.Height() != 0) {
return Err("Mismatch between format and given top offset"_ns);
}
}
return Ok();
}
static Result<gfx::IntRect, nsCString> ParseVisibleRect(
const gfx::IntRect& aDefaultRect, const Maybe<gfx::IntRect>& aOverrideRect,
const gfx::IntSize& aCodedSize, const VideoFrame::Format& aFormat) {
MOZ_ASSERT(ValidateVisibility(aDefaultRect, aCodedSize).isOk());
gfx::IntRect rect = aDefaultRect;
if (aOverrideRect) {
// Skip checking overrideRect's width and height here. They should be
// checked before reaching here, and ValidateVisibility will assert it.
MOZ_TRY(ValidateVisibility(aOverrideRect.ref(), aCodedSize));
rect = *aOverrideRect;
}
MOZ_TRY(VerifyRectOffsetAlignment(Some(aFormat), rect));
return rect;
}
struct ComputedPlaneLayout {
// The offset from the beginning of the buffer in one plane.
uint32_t mDestinationOffset = 0;
// The stride of the image data in one plane.
uint32_t mDestinationStride = 0;
// Sample count of picture's top offset (a.k.a samples of y).
uint32_t mSourceTop = 0;
// Sample count of the picture's height.
uint32_t mSourceHeight = 0;
// Byte count of the picture's left offset (a.k.a bytes of x).
uint32_t mSourceLeftBytes = 0;
// Byte count of the picture's width.
uint32_t mSourceWidthBytes = 0;
};
struct CombinedBufferLayout {
CombinedBufferLayout() : mAllocationSize(0) {}
CombinedBufferLayout(uint32_t aAllocationSize,
nsTArray<ComputedPlaneLayout>&& aLayout)
: mAllocationSize(aAllocationSize),
mComputedLayouts(std::move(aLayout)) {}
uint32_t mAllocationSize = 0;
nsTArray<ComputedPlaneLayout> mComputedLayouts;
};
static Result<CombinedBufferLayout, nsCString> ComputeLayoutAndAllocationSize(
const gfx::IntRect& aRect, const VideoFrame::Format& aFormat,
const Sequence<PlaneLayout>* aPlaneLayouts) {
nsTArray<VideoFrame::Format::Plane> planes = aFormat.Planes();
if (aPlaneLayouts && aPlaneLayouts->Length() != planes.Length()) {
return Err("Mismatch between format and layout"_ns);
}
uint32_t minAllocationSize = 0;
nsTArray<ComputedPlaneLayout> layouts;
nsTArray<uint32_t> endOffsets;
for (size_t i = 0; i < planes.Length(); ++i) {
const VideoFrame::Format::Plane& p = planes[i];
const gfx::IntSize sampleSize = aFormat.SampleSize(p);
MOZ_RELEASE_ASSERT(!sampleSize.IsEmpty());
// aRect's x, y, width, and height are int32_t, and sampleSize's width and
// height >= 1, so (aRect.* / sampleSize.*) must be in int32_t range.
CheckedUint32 sourceTop(aRect.Y());
sourceTop /= sampleSize.Height();
MOZ_RELEASE_ASSERT(sourceTop.isValid());
CheckedUint32 sourceHeight(aRect.Height());
sourceHeight /= sampleSize.Height();
MOZ_RELEASE_ASSERT(sourceHeight.isValid());
CheckedUint32 sourceLeftBytes(aRect.X());
sourceLeftBytes /= sampleSize.Width();
MOZ_RELEASE_ASSERT(sourceLeftBytes.isValid());
sourceLeftBytes *= aFormat.SampleBytes(p);
if (!sourceLeftBytes.isValid()) {
return Err(nsPrintfCString(
"The parsed-rect's x-offset is too large for %s plane",
aFormat.PlaneName(p)));
}
CheckedUint32 sourceWidthBytes(aRect.Width());
sourceWidthBytes /= sampleSize.Width();
MOZ_RELEASE_ASSERT(sourceWidthBytes.isValid());
sourceWidthBytes *= aFormat.SampleBytes(p);
if (!sourceWidthBytes.isValid()) {
return Err(
nsPrintfCString("The parsed-rect's width is too large for %s plane",
aFormat.PlaneName(p)));
}
// TODO: Spec here is wrong so we do differently:
// This comment should be removed once the issue is resolved.
ComputedPlaneLayout layout{.mDestinationOffset = 0,
.mDestinationStride = 0,
.mSourceTop = sourceTop.value(),
.mSourceHeight = sourceHeight.value(),
.mSourceLeftBytes = sourceLeftBytes.value(),
.mSourceWidthBytes = sourceWidthBytes.value()};
if (aPlaneLayouts) {
const PlaneLayout& planeLayout = aPlaneLayouts->ElementAt(i);
if (planeLayout.mStride < layout.mSourceWidthBytes) {
return Err(nsPrintfCString("The stride in %s plane is too small",
aFormat.PlaneName(p)));
}
layout.mDestinationOffset = planeLayout.mOffset;
layout.mDestinationStride = planeLayout.mStride;
} else {
layout.mDestinationOffset = minAllocationSize;
layout.mDestinationStride = layout.mSourceWidthBytes;
}
const CheckedInt<uint32_t> planeSize =
CheckedInt<uint32_t>(layout.mDestinationStride) * layout.mSourceHeight;
if (!planeSize.isValid()) {
return Err("Invalid layout with an over-sized plane"_ns);
}
const CheckedInt<uint32_t> planeEnd = planeSize + layout.mDestinationOffset;
if (!planeEnd.isValid()) {
return Err("Invalid layout with the out-out-bound offset"_ns);
}
endOffsets.AppendElement(planeEnd.value());
minAllocationSize = std::max(minAllocationSize, planeEnd.value());
for (size_t j = 0; j < i; ++j) {
const ComputedPlaneLayout& earlier = layouts[j];
// If the current data's end is smaller or equal to the previous one's
// head, or if the previous data's end is smaller or equal to the current
// one's head, then they do not overlap. Otherwise, they do.
if (endOffsets[i] > earlier.mDestinationOffset &&
endOffsets[j] > layout.mDestinationOffset) {
return Err("Invalid layout with the overlapped planes"_ns);
}
}
layouts.AppendElement(layout);
}
return CombinedBufferLayout(minAllocationSize, std::move(layouts));
}
static Result<Ok, nsCString> VerifyRectSizeAlignment(
const VideoFrame::Format& aFormat, const gfx::IntRect& aRect) {
for (const VideoFrame::Format::Plane& p : aFormat.Planes()) {
const gfx::IntSize sample = aFormat.SampleSize(p);
if (aRect.Width() % sample.Width() != 0) {
return Err("Mismatch between format and given rect's width"_ns);
}
if (aRect.Height() % sample.Height() != 0) {
return Err("Mismatch between format and given rect's height"_ns);
}
}
return Ok();
}
static Result<CombinedBufferLayout, nsCString> ParseVideoFrameCopyToOptions(
const VideoFrameCopyToOptions& aOptions, const gfx::IntRect& aVisibleRect,
const gfx::IntSize& aCodedSize, const VideoFrame::Format& aFormat) {
Maybe<gfx::IntRect> overrideRect;
if (aOptions.mRect.WasPassed()) {
// TODO: We handle some edge cases that spec misses:
// This comment should be removed once the issue is resolved.
overrideRect.emplace();
MOZ_TRY_VAR(overrideRect.ref(),
ToIntRect(aOptions.mRect.Value()).mapErr([](nsCString error) {
error.Insert("rect's ", 0);
return error;
}));
MOZ_TRY(VerifyRectSizeAlignment(aFormat, overrideRect.ref()));
}
gfx::IntRect parsedRect;
MOZ_TRY_VAR(parsedRect, ParseVisibleRect(aVisibleRect, overrideRect,
aCodedSize, aFormat));
const Sequence<PlaneLayout>* optLayout = OptionalToPointer(aOptions.mLayout);
return ComputeLayoutAndAllocationSize(parsedRect, aFormat, optLayout);
}
static bool IsYUVFormat(const VideoPixelFormat& aFormat) {
switch (aFormat) {
case VideoPixelFormat::I420:
case VideoPixelFormat::I420P10:
case VideoPixelFormat::I420P12:
case VideoPixelFormat::I420A:
case VideoPixelFormat::I420AP10:
case VideoPixelFormat::I420AP12:
case VideoPixelFormat::I422:
case VideoPixelFormat::I422P10:
case VideoPixelFormat::I422P12:
case VideoPixelFormat::I422A:
case VideoPixelFormat::I422AP10:
case VideoPixelFormat::I422AP12:
case VideoPixelFormat::I444:
case VideoPixelFormat::I444P10:
case VideoPixelFormat::I444P12:
case VideoPixelFormat::I444A:
case VideoPixelFormat::I444AP10:
case VideoPixelFormat::I444AP12:
case VideoPixelFormat::NV12:
return true;
case VideoPixelFormat::RGBA:
case VideoPixelFormat::RGBX:
case VideoPixelFormat::BGRA:
case VideoPixelFormat::BGRX:
return false;
}
return false;
}
static VideoColorSpaceInit PickColorSpace(
const VideoColorSpaceInit* aInitColorSpace,
const VideoPixelFormat& aFormat) {
VideoColorSpaceInit colorSpace;
if (aInitColorSpace) {
colorSpace = *aInitColorSpace;
// By spec, we MAY replace null members of aInitColorSpace with guessed
// values so we can always use these in CreateYUVImageFromBuffer.
if (IsYUVFormat(aFormat) && colorSpace.mMatrix.IsNull()) {
colorSpace.mMatrix.SetValue(VideoMatrixCoefficients::Bt709);
}
return colorSpace;
}
switch (aFormat) {
case VideoPixelFormat::I420:
case VideoPixelFormat::I420P10:
case VideoPixelFormat::I420P12:
case VideoPixelFormat::I420A:
case VideoPixelFormat::I420AP10:
case VideoPixelFormat::I420AP12:
case VideoPixelFormat::I422:
case VideoPixelFormat::I422P10:
case VideoPixelFormat::I422P12:
case VideoPixelFormat::I422A:
case VideoPixelFormat::I422AP10:
case VideoPixelFormat::I422AP12:
case VideoPixelFormat::I444:
case VideoPixelFormat::I444P10:
case VideoPixelFormat::I444P12:
case VideoPixelFormat::I444A:
case VideoPixelFormat::I444AP10:
case VideoPixelFormat::I444AP12:
case VideoPixelFormat::NV12:
colorSpace.mFullRange.SetValue(false);
colorSpace.mMatrix.SetValue(VideoMatrixCoefficients::Bt709);
colorSpace.mPrimaries.SetValue(VideoColorPrimaries::Bt709);
colorSpace.mTransfer.SetValue(VideoTransferCharacteristics::Bt709);
break;
case VideoPixelFormat::RGBA:
case VideoPixelFormat::RGBX:
case VideoPixelFormat::BGRA:
case VideoPixelFormat::BGRX:
colorSpace.mFullRange.SetValue(true);
colorSpace.mMatrix.SetValue(VideoMatrixCoefficients::Rgb);
colorSpace.mPrimaries.SetValue(VideoColorPrimaries::Bt709);
colorSpace.mTransfer.SetValue(VideoTransferCharacteristics::Iec61966_2_1);
break;
}
return colorSpace;
}
static Result<std::pair<Maybe<gfx::IntRect>, Maybe<gfx::IntSize>>, nsCString>
ValidateVideoFrameInit(const VideoFrameInit& aInit,
const Maybe<VideoFrame::Format>& aFormat,
const gfx::IntSize& aCodedSize) {
if (aCodedSize.Width() <= 0 || aCodedSize.Height() <= 0) {
return Err("codedWidth and codedHeight must be positive"_ns);
}
Maybe<gfx::IntRect> visibleRect;
if (aInit.mVisibleRect.WasPassed()) {
visibleRect.emplace();
MOZ_TRY_VAR(
visibleRect.ref(),
ToIntRect(aInit.mVisibleRect.Value()).mapErr([](nsCString error) {
error.Insert("visibleRect's ", 0);
return error;
}));
MOZ_TRY(ValidateVisibility(visibleRect.ref(), aCodedSize));
MOZ_TRY(VerifyRectOffsetAlignment(aFormat, visibleRect.ref()));
}
Maybe<gfx::IntSize> displaySize;
MOZ_TRY_VAR(displaySize, MaybeGetDisplaySize(aInit));
return std::make_pair(visibleRect, displaySize);
}
/*
* The followings are helpers to create a VideoFrame from a given buffer
*/
static Result<RefPtr<gfx::DataSourceSurface>, MediaResult> AllocateBGRASurface(
gfx::DataSourceSurface* aSurface) {
MOZ_ASSERT(aSurface);
// Memory allocation relies on CreateDataSourceSurfaceWithStride so we still
// need to do this even if the format is SurfaceFormat::BGR{A, X}.
gfx::DataSourceSurface::ScopedMap surfaceMap(aSurface,
gfx::DataSourceSurface::READ);
if (!surfaceMap.IsMapped()) {
return Err(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
"The source surface is not readable"_ns));
}
RefPtr<gfx::DataSourceSurface> bgraSurface =
gfx::Factory::CreateDataSourceSurfaceWithStride(
aSurface->GetSize(), gfx::SurfaceFormat::B8G8R8A8,
surfaceMap.GetStride());
if (!bgraSurface) {
return Err(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
"Failed to allocate a BGRA surface"_ns));
}
gfx::DataSourceSurface::ScopedMap bgraMap(bgraSurface,
gfx::DataSourceSurface::WRITE);
if (!bgraMap.IsMapped()) {
return Err(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
"The allocated BGRA surface is not writable"_ns));
}
gfx::SwizzleData(surfaceMap.GetData(), surfaceMap.GetStride(),
aSurface->GetFormat(), bgraMap.GetData(),
bgraMap.GetStride(), bgraSurface->GetFormat(),
bgraSurface->GetSize());
return bgraSurface;
}
static Result<RefPtr<layers::Image>, MediaResult> CreateImageFromRawData(
const gfx::IntSize& aSize, int32_t aStride, gfx::SurfaceFormat aFormat,
const Span<uint8_t>& aBuffer) {
MOZ_ASSERT(!aSize.IsEmpty());
// Wrap the source buffer into a DataSourceSurface.
RefPtr<gfx::DataSourceSurface> surface =
gfx::Factory::CreateWrappingDataSourceSurface(aBuffer.data(), aStride,
aSize, aFormat);
if (!surface) {
return Err(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
"Failed to wrap the raw data into a surface"_ns));
}
// Gecko favors BGRA so we convert surface into BGRA format first.
RefPtr<gfx::DataSourceSurface> bgraSurface;
MOZ_TRY_VAR(bgraSurface, AllocateBGRASurface(surface));
MOZ_ASSERT(bgraSurface);
return RefPtr<layers::Image>(
new layers::SourceSurfaceImage(bgraSurface.get()));
}
static Result<RefPtr<layers::Image>, MediaResult> CreateRGBAImageFromBuffer(
const VideoFrame::Format& aFormat, const gfx::IntSize& aSize,
const Span<uint8_t>& aBuffer) {
const gfx::SurfaceFormat format = aFormat.ToSurfaceFormat();
MOZ_ASSERT(format == gfx::SurfaceFormat::R8G8B8A8 ||
format == gfx::SurfaceFormat::R8G8B8X8 ||
format == gfx::SurfaceFormat::B8G8R8A8 ||
format == gfx::SurfaceFormat::B8G8R8X8);
// TODO: Use aFormat.SampleBytes() instead?
CheckedInt<int32_t> stride(BytesPerPixel(format));
stride *= aSize.Width();
if (!stride.isValid()) {
return Err(MediaResult(NS_ERROR_INVALID_ARG,
"Image size exceeds implementation's limit"_ns));
}
return CreateImageFromRawData(aSize, stride.value(), format, aBuffer);
}
static Result<RefPtr<layers::Image>, MediaResult> CreateYUVImageFromBuffer(
const VideoFrame::Format& aFormat, const VideoColorSpaceInit& aColorSpace,
const gfx::IntSize& aSize, const Span<uint8_t>& aBuffer) {
if (aFormat.PixelFormat() == VideoPixelFormat::I420 ||
aFormat.PixelFormat() == VideoPixelFormat::I420A) {
UniquePtr<I420BufferReader> reader;
if (aFormat.PixelFormat() == VideoPixelFormat::I420) {
reader.reset(
new I420BufferReader(aBuffer, aSize.Width(), aSize.Height()));
} else {
reader.reset(
new I420ABufferReader(aBuffer, aSize.Width(), aSize.Height()));
}
layers::PlanarYCbCrData data;
data.mPictureRect = gfx::IntRect(0, 0, reader->mWidth, reader->mHeight);
// Y plane.
data.mYChannel = const_cast<uint8_t*>(reader->DataY());
data.mYStride = reader->mStrideY;
data.mYSkip = 0;
// Cb plane.
data.mCbChannel = const_cast<uint8_t*>(reader->DataU());
data.mCbSkip = 0;
// Cr plane.
data.mCrChannel = const_cast<uint8_t*>(reader->DataV());
data.mCbSkip = 0;
// A plane.
if (aFormat.PixelFormat() == VideoPixelFormat::I420A) {
data.mAlpha.emplace();
data.mAlpha->mChannel =
const_cast<uint8_t*>(reader->AsI420ABufferReader()->DataA());
data.mAlpha->mSize = data.mPictureRect.Size();
// No values for mDepth and mPremultiplied.
}
// CbCr plane vector.
MOZ_RELEASE_ASSERT(reader->mStrideU == reader->mStrideV);
data.mCbCrStride = reader->mStrideU;
data.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
// Color settings.
if (!aColorSpace.mFullRange.IsNull()) {
data.mColorRange = ToColorRange(aColorSpace.mFullRange.Value());
}
MOZ_RELEASE_ASSERT(!aColorSpace.mMatrix.IsNull());
data.mYUVColorSpace = ToColorSpace(aColorSpace.mMatrix.Value());
if (!aColorSpace.mTransfer.IsNull()) {
data.mTransferFunction =
ToTransferFunction(aColorSpace.mTransfer.Value());
}
if (!aColorSpace.mPrimaries.IsNull()) {
data.mColorPrimaries = ToPrimaries(aColorSpace.mPrimaries.Value());
}
RefPtr<layers::PlanarYCbCrImage> image =
new layers::RecyclingPlanarYCbCrImage(new layers::BufferRecycleBin());
nsresult r = image->CopyData(data);
if (NS_FAILED(r)) {
return Err(MediaResult(
r,
nsPrintfCString(
"Failed to create I420%s image",
(aFormat.PixelFormat() == VideoPixelFormat::I420A ? "A" : ""))));
}
// Manually cast type to make Result work.
return RefPtr<layers::Image>(image.forget());
}
if (aFormat.PixelFormat() == VideoPixelFormat::NV12) {
NV12BufferReader reader(aBuffer, aSize.Width(), aSize.Height());
layers::PlanarYCbCrData data;
data.mPictureRect = gfx::IntRect(0, 0, reader.mWidth, reader.mHeight);
// Y plane.
data.mYChannel = const_cast<uint8_t*>(reader.DataY());
data.mYStride = reader.mStrideY;
data.mYSkip = 0;
// Cb plane.
data.mCbChannel = const_cast<uint8_t*>(reader.DataUV());
data.mCbSkip = 1;
// Cr plane.
data.mCrChannel = data.mCbChannel + 1;
data.mCrSkip = 1;
// CbCr plane vector.
data.mCbCrStride = reader.mStrideUV;
data.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
// Color settings.
if (!aColorSpace.mFullRange.IsNull()) {
data.mColorRange = ToColorRange(aColorSpace.mFullRange.Value());
}
MOZ_RELEASE_ASSERT(!aColorSpace.mMatrix.IsNull());
data.mYUVColorSpace = ToColorSpace(aColorSpace.mMatrix.Value());
if (!aColorSpace.mTransfer.IsNull()) {
data.mTransferFunction =
ToTransferFunction(aColorSpace.mTransfer.Value());
}
if (!aColorSpace.mPrimaries.IsNull()) {
data.mColorPrimaries = ToPrimaries(aColorSpace.mPrimaries.Value());
}
RefPtr<layers::NVImage> image = new layers::NVImage();
nsresult r = image->SetData(data);
if (NS_FAILED(r)) {
return Err(MediaResult(r, "Failed to create NV12 image"_ns));
}
// Manually cast type to make Result work.
return RefPtr<layers::Image>(image.forget());
}
return Err(MediaResult(
NS_ERROR_DOM_NOT_SUPPORTED_ERR,
nsPrintfCString("%s is unsupported",
dom::GetEnumString(aFormat.PixelFormat()).get())));
}
static Result<RefPtr<layers::Image>, MediaResult> CreateImageFromBuffer(
const VideoFrame::Format& aFormat, const VideoColorSpaceInit& aColorSpace,
const gfx::IntSize& aSize, const Span<uint8_t>& aBuffer) {
switch (aFormat.PixelFormat()) {
case VideoPixelFormat::I420:
case VideoPixelFormat::I420A:
case VideoPixelFormat::NV12:
return CreateYUVImageFromBuffer(aFormat, aColorSpace, aSize, aBuffer);
case VideoPixelFormat::I420P10:
case VideoPixelFormat::I420P12:
case VideoPixelFormat::I420AP10:
case VideoPixelFormat::I420AP12:
case VideoPixelFormat::I422:
case VideoPixelFormat::I422P10:
case VideoPixelFormat::I422P12:
case VideoPixelFormat::I422A:
case VideoPixelFormat::I422AP10:
case VideoPixelFormat::I422AP12:
case VideoPixelFormat::I444:
case VideoPixelFormat::I444P10:
case VideoPixelFormat::I444P12:
case VideoPixelFormat::I444A:
case VideoPixelFormat::I444AP10:
case VideoPixelFormat::I444AP12:
// Not yet support for now.
break;
case VideoPixelFormat::RGBA:
case VideoPixelFormat::RGBX:
case VideoPixelFormat::BGRA:
case VideoPixelFormat::BGRX:
return CreateRGBAImageFromBuffer(aFormat, aSize, aBuffer);
}
return Err(MediaResult(
NS_ERROR_DOM_NOT_SUPPORTED_ERR,
nsPrintfCString("%s is unsupported",
dom::GetEnumString(aFormat.PixelFormat()).get())));
}
template <class T>
static Result<RefPtr<VideoFrame>, MediaResult> CreateVideoFrameFromBuffer(
nsIGlobalObject* aGlobal, const T& aBuffer,
const VideoFrameBufferInit& aInit) {
if (aInit.mColorSpace.WasPassed() &&
!aInit.mColorSpace.Value().mTransfer.IsNull() &&
aInit.mColorSpace.Value().mTransfer.Value() ==
VideoTransferCharacteristics::Linear) {
return Err(MediaResult(NS_ERROR_DOM_NOT_SUPPORTED_ERR,
"linear RGB is not supported"_ns));
}
std::tuple<gfx::IntSize, Maybe<gfx::IntRect>, Maybe<gfx::IntSize>> init;
MOZ_TRY_VAR(init,
ValidateVideoFrameBufferInit(aInit).mapErr([](nsCString error) {
return MediaResult(NS_ERROR_INVALID_ARG, error);
}));
gfx::IntSize codedSize = std::get<0>(init);
Maybe<gfx::IntRect> visibleRect = std::get<1>(init);
Maybe<gfx::IntSize> displaySize = std::get<2>(init);
VideoFrame::Format format(aInit.mFormat);
// TODO: Spec doesn't ask for this in ctor but Pixel Format does. See
// This comment should be removed once the issue is resolved.
if (!format.IsValidSize(codedSize)) {
return Err(MediaResult(NS_ERROR_INVALID_ARG,
"coded width and/or height is invalid"_ns));
}
gfx::IntRect parsedRect;
MOZ_TRY_VAR(parsedRect, ParseVisibleRect(gfx::IntRect({0, 0}, codedSize),
visibleRect, codedSize, format)
.mapErr([](nsCString error) {
return MediaResult(NS_ERROR_INVALID_ARG, error);
}));
const Sequence<PlaneLayout>* optLayout = OptionalToPointer(aInit.mLayout);
CombinedBufferLayout combinedLayout;
MOZ_TRY_VAR(combinedLayout,
ComputeLayoutAndAllocationSize(parsedRect, format, optLayout)
.mapErr([](nsCString error) {
return MediaResult(NS_ERROR_INVALID_ARG, error);
}));
Maybe<uint64_t> duration = OptionalToMaybe(aInit.mDuration);
VideoColorSpaceInit colorSpace =
PickColorSpace(OptionalToPointer(aInit.mColorSpace), aInit.mFormat);
RefPtr<layers::Image> data;
MOZ_TRY_VAR(
data,
aBuffer.ProcessFixedData(
[&](const Span<uint8_t>& aData)
-> Result<RefPtr<layers::Image>, MediaResult> {
if (aData.Length() <
static_cast<size_t>(combinedLayout.mAllocationSize)) {
return Err(
MediaResult(NS_ERROR_INVALID_ARG, "data is too small"_ns));
}
// TODO: If codedSize is (3, 3) and visibleRect is (0, 0, 1, 1) but
// the data is 2 x 2 RGBA buffer (2 x 2 x 4 bytes), it pass the
// above check. In this case, we can crop it to a 1 x 1-codedSize
// image (Bug 1782128).
if (aData.Length() < format.ByteCount(codedSize)) {
return Err(
MediaResult(NS_ERROR_INVALID_ARG, "data is too small"_ns));
}
return CreateImageFromBuffer(format, colorSpace, codedSize, aData);
}));
MOZ_ASSERT(data);
MOZ_ASSERT(data->GetSize() == codedSize);
// By spec, we should set visible* here. But if we don't change the image,
// visible* is same as parsedRect here. The display{Width, Height} is
// visible{Width, Height} if it's not set.
// TODO: Spec should assign aInit.mFormat to inner format value:
// This comment should be removed once the issue is resolved.
return MakeRefPtr<VideoFrame>(aGlobal, data, Some(aInit.mFormat), codedSize,
parsedRect,
displaySize ? *displaySize : parsedRect.Size(),
duration, aInit.mTimestamp, colorSpace);
}
template <class T>
static already_AddRefed<VideoFrame> CreateVideoFrameFromBuffer(
const GlobalObject& aGlobal, const T& aBuffer,
const VideoFrameBufferInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
auto r = CreateVideoFrameFromBuffer(global, aBuffer, aInit);
if (r.isErr()) {
MediaResult err = r.unwrapErr();
if (err.Code() == NS_ERROR_DOM_NOT_SUPPORTED_ERR) {
aRv.ThrowNotSupportedError(err.Message());
} else {
aRv.ThrowTypeError(err.Message());
}
return nullptr;
}
return r.unwrap().forget();
}
static void InitializeVisibleRectAndDisplaySize(
Maybe<gfx::IntRect>& aVisibleRect, Maybe<gfx::IntSize>& aDisplaySize,
gfx::IntRect aDefaultVisibleRect, gfx::IntSize aDefaultDisplaySize) {
if (!aVisibleRect) {
aVisibleRect.emplace(aDefaultVisibleRect);
}
if (!aDisplaySize) {
double wScale = static_cast<double>(aDefaultDisplaySize.Width()) /
aDefaultVisibleRect.Width();
double hScale = static_cast<double>(aDefaultDisplaySize.Height()) /
aDefaultVisibleRect.Height();
double w = wScale * aVisibleRect->Width();
double h = hScale * aVisibleRect->Height();
aDisplaySize.emplace(gfx::IntSize(static_cast<uint32_t>(round(w)),
static_cast<uint32_t>(round(h))));
}
}
static Result<already_AddRefed<VideoFrame>, nsCString>
InitializeFrameWithResourceAndSize(
nsIGlobalObject* aGlobal, const VideoFrameInit& aInit,
already_AddRefed<layers::SourceSurfaceImage> aImage) {
MOZ_ASSERT(aInit.mTimestamp.WasPassed());
RefPtr<layers::SourceSurfaceImage> image(aImage);
MOZ_ASSERT(image);
RefPtr<gfx::SourceSurface> surface = image->GetAsSourceSurface();
Maybe<VideoFrame::Format> format =
SurfaceFormatToVideoPixelFormat(surface->GetFormat())
.map([](const VideoPixelFormat& aFormat) {
return VideoFrame::Format(aFormat);
});
std::pair<Maybe<gfx::IntRect>, Maybe<gfx::IntSize>> init;
MOZ_TRY_VAR(init, ValidateVideoFrameInit(aInit, format, image->GetSize()));
Maybe<gfx::IntRect> visibleRect = init.first;
Maybe<gfx::IntSize> displaySize = init.second;
if (format && aInit.mAlpha == AlphaOption::Discard) {
format->MakeOpaque();
// Keep the alpha data in image for now until it's being rendered.
// TODO: The alpha will still be rendered if the format is unrecognized
// since no additional flag keeping this request. Should spec address what
// to do in this case?
}
InitializeVisibleRectAndDisplaySize(visibleRect, displaySize,
gfx::IntRect({0, 0}, image->GetSize()),
image->GetSize());
Maybe<uint64_t> duration = OptionalToMaybe(aInit.mDuration);
VideoColorSpaceInit colorSpace{};
if (IsYUVFormat(
SurfaceFormatToVideoPixelFormat(surface->GetFormat()).ref())) {
colorSpace = FallbackColorSpaceForVideoContent();
} else {
colorSpace = FallbackColorSpaceForWebContent();
}
return MakeAndAddRef<VideoFrame>(
aGlobal, image, format ? Some(format->PixelFormat()) : Nothing(),
image->GetSize(), visibleRect.value(), displaySize.value(), duration,
aInit.mTimestamp.Value(), colorSpace);
}
static Result<already_AddRefed<VideoFrame>, nsCString>
InitializeFrameFromOtherFrame(nsIGlobalObject* aGlobal, VideoFrameData&& aData,
const VideoFrameInit& aInit) {
MOZ_ASSERT(aGlobal);
MOZ_ASSERT(aData.mImage);
Maybe<VideoFrame::Format> format =
aData.mFormat ? Some(VideoFrame::Format(*aData.mFormat)) : Nothing();
if (format && aInit.mAlpha == AlphaOption::Discard) {
format->MakeOpaque();
// Keep the alpha data in image for now until it's being rendered.
// TODO: The alpha will still be rendered if the format is unrecognized
// since no additional flag keeping this request. Should spec address what
// to do in this case?
}
std::pair<Maybe<gfx::IntRect>, Maybe<gfx::IntSize>> init;
MOZ_TRY_VAR(init,
ValidateVideoFrameInit(aInit, format, aData.mImage->GetSize()));
Maybe<gfx::IntRect> visibleRect = init.first;
Maybe<gfx::IntSize> displaySize = init.second;
InitializeVisibleRectAndDisplaySize(visibleRect, displaySize,
aData.mVisibleRect, aData.mDisplaySize);
Maybe<uint64_t> duration = OptionalToMaybe(aInit.mDuration);
int64_t timestamp = aInit.mTimestamp.WasPassed() ? aInit.mTimestamp.Value()
: aData.mTimestamp;
return MakeAndAddRef<VideoFrame>(
aGlobal, aData.mImage, format ? Some(format->PixelFormat()) : Nothing(),
aData.mImage->GetSize(), *visibleRect, *displaySize, duration, timestamp,
aData.mColorSpace);
}
/*
* Helper classes carrying VideoFrame data
*/
VideoFrameData::VideoFrameData(layers::Image* aImage,
const Maybe<VideoPixelFormat>& aFormat,
gfx::IntRect aVisibleRect,
gfx::IntSize aDisplaySize,
Maybe<uint64_t> aDuration, int64_t aTimestamp,
const VideoColorSpaceInit& aColorSpace)
: mImage(aImage),
mFormat(aFormat),
mVisibleRect(aVisibleRect),
mDisplaySize(aDisplaySize),
mDuration(aDuration),
mTimestamp(aTimestamp),
mColorSpace(aColorSpace) {}
VideoFrameSerializedData::VideoFrameSerializedData(const VideoFrameData& aData,
gfx::IntSize aCodedSize)
: VideoFrameData(aData), mCodedSize(aCodedSize) {}
/*
* W3C Webcodecs VideoFrame implementation
*/
VideoFrame::VideoFrame(nsIGlobalObject* aParent,
const RefPtr<layers::Image>& aImage,
const Maybe<VideoPixelFormat>& aFormat,
gfx::IntSize aCodedSize, gfx::IntRect aVisibleRect,
gfx::IntSize aDisplaySize,
const Maybe<uint64_t>& aDuration, int64_t aTimestamp,
const VideoColorSpaceInit& aColorSpace)
: mParent(aParent),
mCodedSize(aCodedSize),
mVisibleRect(aVisibleRect),
mDisplaySize(aDisplaySize),
mDuration(aDuration),
mTimestamp(aTimestamp),
mColorSpace(aColorSpace) {
MOZ_ASSERT(mParent);
LOG("VideoFrame %p ctor", this);
mResource.emplace(
Resource(aImage, aFormat.map([](const VideoPixelFormat& aPixelFormat) {
return VideoFrame::Format(aPixelFormat);
})));
if (!mResource->mFormat) {
LOGW("Create a VideoFrame with an unrecognized image format");
}
StartAutoClose();
}
VideoFrame::VideoFrame(nsIGlobalObject* aParent,
const VideoFrameSerializedData& aData)
: mParent(aParent),
mCodedSize(aData.mCodedSize),
mVisibleRect(aData.mVisibleRect),
mDisplaySize(aData.mDisplaySize),
mDuration(aData.mDuration),
mTimestamp(aData.mTimestamp),
mColorSpace(aData.mColorSpace) {
MOZ_ASSERT(mParent);
LOG("VideoFrame %p ctor", this);
mResource.emplace(Resource(
aData.mImage, aData.mFormat.map([](const VideoPixelFormat& aPixelFormat) {
return VideoFrame::Format(aPixelFormat);
})));
if (!mResource->mFormat) {
LOGW("Create a VideoFrame with an unrecognized image format");
}
StartAutoClose();
}
VideoFrame::VideoFrame(const VideoFrame& aOther)
: mParent(aOther.mParent),
mResource(aOther.mResource),
mCodedSize(aOther.mCodedSize),
mVisibleRect(aOther.mVisibleRect),
mDisplaySize(aOther.mDisplaySize),
mDuration(aOther.mDuration),
mTimestamp(aOther.mTimestamp),
mColorSpace(aOther.mColorSpace) {
MOZ_ASSERT(mParent);
LOG("VideoFrame %p ctor", this);
StartAutoClose();
}
VideoFrame::~VideoFrame() {
MOZ_ASSERT(IsClosed());
LOG("VideoFrame %p dtor", this);
}
nsIGlobalObject* VideoFrame::GetParentObject() const {
AssertIsOnOwningThread();
return mParent.get();
}
JSObject* VideoFrame::WrapObject(JSContext* aCx,
JS::Handle<JSObject*> aGivenProto) {
AssertIsOnOwningThread();
return VideoFrame_Binding::Wrap(aCx, this, aGivenProto);
}
// The following constructors are defined in
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, HTMLImageElement& aImageElement,
const VideoFrameInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
// Check the usability.
if (aImageElement.State().HasState(ElementState::BROKEN)) {
aRv.ThrowInvalidStateError("The image's state is broken");
return nullptr;
}
if (!aImageElement.Complete()) {
aRv.ThrowInvalidStateError("The image is not completely loaded yet");
return nullptr;
}
if (aImageElement.NaturalWidth() == 0) {
aRv.ThrowInvalidStateError("The image has a width of 0");
return nullptr;
}
if (aImageElement.NaturalHeight() == 0) {
aRv.ThrowInvalidStateError("The image has a height of 0");
return nullptr;
}
// If the origin of HTMLImageElement's image data is not same origin with the
// entry settings object's origin, then throw a SecurityError DOMException.
SurfaceFromElementResult res = nsLayoutUtils::SurfaceFromElement(
&aImageElement, nsLayoutUtils::SFE_WANT_FIRST_FRAME_IF_IMAGE);
if (res.mIsWriteOnly) {
// Being write-only implies its image is cross-origin w/out CORS headers.
aRv.ThrowSecurityError("The image is not same-origin");
return nullptr;
}
RefPtr<gfx::SourceSurface> surface = res.GetSourceSurface();
if (NS_WARN_IF(!surface)) {
aRv.ThrowInvalidStateError("The image's surface acquisition failed");
return nullptr;
}
if (!aInit.mTimestamp.WasPassed()) {
aRv.ThrowTypeError("Missing timestamp");
return nullptr;
}
RefPtr<layers::SourceSurfaceImage> image =
new layers::SourceSurfaceImage(surface.get());
auto r = InitializeFrameWithResourceAndSize(global, aInit, image.forget());
if (r.isErr()) {
aRv.ThrowTypeError(r.unwrapErr());
return nullptr;
}
return r.unwrap();
}
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, SVGImageElement& aSVGImageElement,
const VideoFrameInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
// Check the usability.
if (aSVGImageElement.State().HasState(ElementState::BROKEN)) {
aRv.ThrowInvalidStateError("The SVG's state is broken");
return nullptr;
}
// If the origin of SVGImageElement's image data is not same origin with the
// entry settings object's origin, then throw a SecurityError DOMException.
SurfaceFromElementResult res = nsLayoutUtils::SurfaceFromElement(
&aSVGImageElement, nsLayoutUtils::SFE_WANT_FIRST_FRAME_IF_IMAGE);
if (res.mIsWriteOnly) {
// Being write-only implies its image is cross-origin w/out CORS headers.
aRv.ThrowSecurityError("The SVG is not same-origin");
return nullptr;
}
RefPtr<gfx::SourceSurface> surface = res.GetSourceSurface();
if (NS_WARN_IF(!surface)) {
aRv.ThrowInvalidStateError("The SVG's surface acquisition failed");
return nullptr;
}
if (!aInit.mTimestamp.WasPassed()) {
aRv.ThrowTypeError("Missing timestamp");
return nullptr;
}
RefPtr<layers::SourceSurfaceImage> image =
new layers::SourceSurfaceImage(surface.get());
auto r = InitializeFrameWithResourceAndSize(global, aInit, image.forget());
if (r.isErr()) {
aRv.ThrowTypeError(r.unwrapErr());
return nullptr;
}
return r.unwrap();
}
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, HTMLCanvasElement& aCanvasElement,
const VideoFrameInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
// Check the usability.
if (aCanvasElement.Width() == 0) {
aRv.ThrowInvalidStateError("The canvas has a width of 0");
return nullptr;
}
if (aCanvasElement.Height() == 0) {
aRv.ThrowInvalidStateError("The canvas has a height of 0");
return nullptr;
}
// If the origin of HTMLCanvasElement's image data is not same origin with the
// entry settings object's origin, then throw a SecurityError DOMException.
SurfaceFromElementResult res = nsLayoutUtils::SurfaceFromElement(
&aCanvasElement, nsLayoutUtils::SFE_WANT_FIRST_FRAME_IF_IMAGE);
if (res.mIsWriteOnly) {
// Being write-only implies its image is cross-origin w/out CORS headers.
aRv.ThrowSecurityError("The canvas is not same-origin");
return nullptr;
}
RefPtr<gfx::SourceSurface> surface = res.GetSourceSurface();
if (NS_WARN_IF(!surface)) {
aRv.ThrowInvalidStateError("The canvas' surface acquisition failed");
return nullptr;
}
if (!aInit.mTimestamp.WasPassed()) {
aRv.ThrowTypeError("Missing timestamp");
return nullptr;
}
RefPtr<layers::SourceSurfaceImage> image =
new layers::SourceSurfaceImage(surface.get());
auto r = InitializeFrameWithResourceAndSize(global, aInit, image.forget());
if (r.isErr()) {
aRv.ThrowTypeError(r.unwrapErr());
return nullptr;
}
return r.unwrap();
}
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, HTMLVideoElement& aVideoElement,
const VideoFrameInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
aVideoElement.LogVisibility(
mozilla::dom::HTMLVideoElement::CallerAPI::CREATE_VIDEOFRAME);
// Check the usability.
if (aVideoElement.NetworkState() == HTMLMediaElement_Binding::NETWORK_EMPTY) {
aRv.ThrowInvalidStateError("The video has not been initialized yet");
return nullptr;
}
if (aVideoElement.ReadyState() <= HTMLMediaElement_Binding::HAVE_METADATA) {
aRv.ThrowInvalidStateError("The video is not ready yet");
return nullptr;
}
RefPtr<layers::Image> image = aVideoElement.GetCurrentImage();
if (!image) {
aRv.ThrowInvalidStateError("The video doesn't have any image yet");
return nullptr;
}
// If the origin of HTMLVideoElement's image data is not same origin with the
// entry settings object's origin, then throw a SecurityError DOMException.
if (!IsSameOrigin(global.get(), aVideoElement)) {
aRv.ThrowSecurityError("The video is not same-origin");
return nullptr;
}
const ImageUtils imageUtils(image);
Maybe<dom::ImageBitmapFormat> f = imageUtils.GetFormat();
Maybe<VideoPixelFormat> format =
f.isSome() ? ImageBitmapFormatToVideoPixelFormat(f.value()) : Nothing();
// TODO: Retrive/infer the duration, and colorspace.
auto r = InitializeFrameFromOtherFrame(
global.get(),
VideoFrameData(image.get(), format, image->GetPictureRect(),
image->GetSize(), Nothing(),
static_cast<int64_t>(aVideoElement.CurrentTime()), {}),
aInit);
if (r.isErr()) {
aRv.ThrowTypeError(r.unwrapErr());
return nullptr;
}
return r.unwrap();
}
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, OffscreenCanvas& aOffscreenCanvas,
const VideoFrameInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
// Check the usability.
if (aOffscreenCanvas.Width() == 0) {
aRv.ThrowInvalidStateError("The canvas has a width of 0");
return nullptr;
}
if (aOffscreenCanvas.Height() == 0) {
aRv.ThrowInvalidStateError("The canvas has a height of 0");
return nullptr;
}
// If the origin of the OffscreenCanvas's image data is not same origin with
// the entry settings object's origin, then throw a SecurityError
// DOMException.
SurfaceFromElementResult res = nsLayoutUtils::SurfaceFromOffscreenCanvas(
&aOffscreenCanvas, nsLayoutUtils::SFE_WANT_FIRST_FRAME_IF_IMAGE);
if (res.mIsWriteOnly) {
// Being write-only implies its image is cross-origin w/out CORS headers.
aRv.ThrowSecurityError("The canvas is not same-origin");
return nullptr;
}
RefPtr<gfx::SourceSurface> surface = res.GetSourceSurface();
if (NS_WARN_IF(!surface)) {
aRv.ThrowInvalidStateError("The canvas' surface acquisition failed");
return nullptr;
}
if (!aInit.mTimestamp.WasPassed()) {
aRv.ThrowTypeError("Missing timestamp");
return nullptr;
}
RefPtr<layers::SourceSurfaceImage> image =
new layers::SourceSurfaceImage(surface.get());
auto r = InitializeFrameWithResourceAndSize(global, aInit, image.forget());
if (r.isErr()) {
aRv.ThrowTypeError(r.unwrapErr());
return nullptr;
}
return r.unwrap();
}
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, ImageBitmap& aImageBitmap,
const VideoFrameInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
// Check the usability.
UniquePtr<ImageBitmapCloneData> data = aImageBitmap.ToCloneData();
if (!data || !data->mSurface) {
aRv.ThrowInvalidStateError(
"The ImageBitmap is closed or its surface acquisition failed");
return nullptr;
}
// If the origin of the ImageBitmap's image data is not same origin with the
// entry settings object's origin, then throw a SecurityError DOMException.
if (data->mWriteOnly) {
// Being write-only implies its image is cross-origin w/out CORS headers.
aRv.ThrowSecurityError("The ImageBitmap is not same-origin");
return nullptr;
}
if (!aInit.mTimestamp.WasPassed()) {
aRv.ThrowTypeError("Missing timestamp");
return nullptr;
}
RefPtr<layers::SourceSurfaceImage> image =
new layers::SourceSurfaceImage(data->mSurface.get());
// TODO: Take care of data->mAlphaType
auto r = InitializeFrameWithResourceAndSize(global, aInit, image.forget());
if (r.isErr()) {
aRv.ThrowTypeError(r.unwrapErr());
return nullptr;
}
return r.unwrap();
}
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, VideoFrame& aVideoFrame,
const VideoFrameInit& aInit, ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
// Check the usability.
if (!aVideoFrame.mResource) {
aRv.ThrowInvalidStateError(
"The VideoFrame is closed or no image found there");
return nullptr;
}
MOZ_ASSERT(aVideoFrame.mResource->mImage->GetSize() ==
aVideoFrame.mCodedSize);
MOZ_ASSERT(!aVideoFrame.mCodedSize.IsEmpty());
MOZ_ASSERT(!aVideoFrame.mVisibleRect.IsEmpty());
MOZ_ASSERT(!aVideoFrame.mDisplaySize.IsEmpty());
// If the origin of the VideoFrame is not same origin with the entry settings
// object's origin, then throw a SecurityError DOMException.
if (!IsSameOrigin(global.get(), aVideoFrame)) {
aRv.ThrowSecurityError("The VideoFrame is not same-origin");
return nullptr;
}
auto r = InitializeFrameFromOtherFrame(
global.get(), aVideoFrame.GetVideoFrameData(), aInit);
if (r.isErr()) {
aRv.ThrowTypeError(r.unwrapErr());
return nullptr;
}
return r.unwrap();
}
// The following constructors are defined in
/* static */
already_AddRefed<VideoFrame> VideoFrame::Constructor(
const GlobalObject& aGlobal, const ArrayBufferView& aBufferView,
const VideoFrameBufferInit& aInit, ErrorResult&