Source code
Revision control
Copy as Markdown
Other Tools
Test Info: Warnings
- This test has a WPT meta file that expects 8 subtest issues.
- This WPT test may be referenced by the following Test IDs:
- /webrtc-encoded-transform/tentative/RTCPeerConnection-insertable-streams-audio.https.html - WPT Dashboard Interop Dashboard
<!DOCTYPE html>
<html>
<head>
<title>RTCPeerConnection Insertable Streams Audio</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src=/resources/testdriver.js></script>
<script src=/resources/testdriver-vendor.js></script>
<script src='../../mediacapture-streams/permission-helper.js'></script>
<script src="../../webrtc/RTCPeerConnection-helper.js"></script>
<script src="./RTCPeerConnection-insertable-streams.js"></script>
</head>
<body>
<script>
async function testAudioFlow(t, negotiationFunction, setConstructorParam, perFrameCallback = () => {}) {
const caller = new RTCPeerConnection(setConstructorParam ? {encodedInsertableStreams:true} : {});
t.add_cleanup(() => caller.close());
const callee = new RTCPeerConnection(setConstructorParam ? {encodedInsertableStreams:true} : {});
t.add_cleanup(() => callee.close());
await setMediaPermission("granted", ["microphone"]);
const stream = await navigator.mediaDevices.getUserMedia({audio:true});
const audioTrack = stream.getAudioTracks()[0];
t.add_cleanup(() => audioTrack.stop());
const audioSender = caller.addTrack(audioTrack)
const senderStreams = audioSender.createEncodedStreams();
const senderReader = senderStreams.readable.getReader();
const senderWriter = senderStreams.writable.getWriter();
const frameInfos = [];
const numFramesPassthrough = 5;
const numFramesReplaceData = 5;
const numFramesModifyData = 5;
const numFramesToSend = numFramesPassthrough + numFramesReplaceData + numFramesModifyData;
let streamsCreatedAtNegotiation;
const ontrackPromise = new Promise(resolve => {
callee.ontrack = t.step_func(() => {
const audioReceiver = callee.getReceivers().find(r => r.track.kind === 'audio');
assert_not_equals(audioReceiver, undefined);
let receiverReader;
let receiverWriter;
if (streamsCreatedAtNegotiation) {
const audioStreams = streamsCreatedAtNegotiation.find(r => r.kind === 'audio');
assert_true(!!audioStreams);
receiverReader = audioStreams.streams.readable.getReader();
receiverWriter = audioStreams.streams.writable.getWriter();
} else {
const receiverStreams =
audioReceiver.createEncodedStreams();
receiverReader = receiverStreams.readable.getReader();
receiverWriter = receiverStreams.writable.getWriter();
}
const maxFramesToReceive = numFramesToSend;
let numVerifiedFrames = 0;
for (let i = 0; i < maxFramesToReceive; i++) {
receiverReader.read().then(t.step_func(result => {
if (frameInfos[numVerifiedFrames] &&
areFrameInfosEqual(result.value, frameInfos[numVerifiedFrames])) {
numVerifiedFrames++;
} else {
// Receiving unexpected frames is an indication that
// frames are not passed correctly between sender and receiver.
assert_unreached("Incorrect frame received");
}
assert_not_equals(result.value.getMetadata().sequenceNumber, undefined);
if (numVerifiedFrames == numFramesToSend)
resolve();
}));
}
});
});
exchangeIceCandidates(caller, callee);
await negotiationFunction(caller, callee, (streams) => {streamsCreatedAtNegotiation = streams;});
// Pass frames as they come from the encoder.
for (let i = 0; i < numFramesPassthrough; i++) {
const result = await senderReader.read();
const frame = result.value;
perFrameCallback(frame);
frameInfos.push({
data: frame.data,
timestamp: frame.timestamp,
type: frame.type,
metadata: frame.getMetadata(),
getMetadata() { return this.metadata; }
});
senderWriter.write(frame);
}
// Replace frame data with arbitrary buffers.
for (let i = 0; i < numFramesReplaceData; i++) {
const result = await senderReader.read()
const frame = result.value;
const buffer = new ArrayBuffer(100);
const int8View = new Int8Array(buffer);
int8View.fill(i);
frame.data = buffer;
perFrameCallback(frame);
frameInfos.push({
data: frame.data,
timestamp: frame.timestamp,
type: frame.type,
metadata: frame.getMetadata(),
getMetadata() { return this.metadata; }
});
senderWriter.write(frame);
}
// Modify frame data.
for (let i = 0; i < numFramesReplaceData; i++) {
const result = await senderReader.read()
const frame = result.value;
const int8View = new Int8Array(frame.data);
int8View.fill(i);
perFrameCallback(frame);
frameInfos.push({
data: frame.data,
timestamp: frame.timestamp,
type: frame.type,
metadata: frame.getMetadata(),
getMetadata() { return this.metadata; }
});
senderWriter.write(frame);
}
return ontrackPromise;
}
for (const setConstructorParam of [false, true]) {
promise_test(async t => {
return testAudioFlow(t, exchangeOfferAnswer, setConstructorParam);
}, 'Frames flow correctly using insertable streams' + (setConstructorParam ? ' with param' : ''));
promise_test(async t => {
return testAudioFlow(t, exchangeOfferAnswerReverse, setConstructorParam);
}, 'Frames flow correctly using insertable streams when receiver starts negotiation' + (setConstructorParam ? ' with param' : ''));
}
promise_test(async t => {
const caller = new RTCPeerConnection({encodedInsertableStreams:true});
t.add_cleanup(() => caller.close());
const callee = new RTCPeerConnection();
t.add_cleanup(() => callee.close());
const stream = await navigator.mediaDevices.getUserMedia({audio:true});
const track = stream.getTracks()[0];
t.add_cleanup(() => track.stop());
const sender = caller.addTrack(track)
const streams = sender.createEncodedStreams();
const transformer = new TransformStream({
transform(frame, controller) {
// Inserting the same frame twice will result in failure since the frame
// will be neutered after the first insertion is processed.
controller.enqueue(frame);
controller.enqueue(frame);
}
});
exchangeIceCandidates(caller, callee);
await exchangeOfferAnswer(caller, callee);
await promise_rejects_dom(
t, 'OperationError',
streams.readable.pipeThrough(transformer).pipeTo(streams.writable));
}, 'Enqueuing the same frame twice fails');
promise_test(async t => {
const caller = new RTCPeerConnection({encodedInsertableStreams:true});
t.add_cleanup(() => caller.close());
const stream = await navigator.mediaDevices.getUserMedia({audio:true});
const track = stream.getTracks()[0];
t.add_cleanup(() => track.stop());
const sender = caller.addTrack(track)
sender.createEncodedStreams();
assert_throws_dom("InvalidStateError", () => sender.createEncodedStreams());
}, 'Creating streams twice throws');
promise_test(async t => {
let clonedFrames = [];
function verifyFramesSerializeAndDeserialize(frame) {
// Clone encoded frames using structedClone (ie serialize + deserialize) and
// keep a reference.
const clone = structuredClone(frame);
clonedFrames.push(clone);
};
await testAudioFlow(
t, exchangeOfferAnswer, /*setConstructorParam=*/false, verifyFramesSerializeAndDeserialize);
// Ensure all of our cloned frames are still alive and well, despite the
// originals having been sent through the PeerConnection.
clonedFrames.forEach((clonedFrame) => {
assert_not_equals(clonedFrame.data.size, 0);
assert_not_equals(clonedFrame.timestamp, 0);
});
}, 'Encoded frames serialize and deserialize into a deep clone');
promise_test(async t => {
let clonedFrames = [];
function rewriteFrameTimestamps(frame) {
// Add 1 to the rtp timestamp of the frame.
const metadata = frame.getMetadata();
metadata.rtpTimestamp += 1;
frame.setMetadata(metadata);
assert_equals(frame.getMetadata().rtpTimestamp, metadata.rtpTimestamp)
};
// Run audio flows which will assert that the frames received have the
// rtp timestamp set by our modification.
await testAudioFlow(
t, exchangeOfferAnswer, /*setConstructorParam=*/false, rewriteFrameTimestamps);
}, 'Modifying rtp timestamp');
</script>
</body>
</html>