Skip to content

Commit

Permalink
Revert "Move tab mirroring performance tests into browser_tests."
Browse files Browse the repository at this point in the history
This reverts commit a94eef1.

Reason for revert: Cast tests are failing on ChromiumOS MSAN:

https://ci.chromium.org/p/chromium/builders/luci.chromium.ci/Linux%20ChromiumOS%20MSan%20Tests/11290

Original change's description:
> Move tab mirroring performance tests into browser_tests.
> 
> This moves TabCapturePerformanceTest and CastV2PerformanceTest into
> browser_tests. By default, they have been changed to run in a quick-
> check mode. When run for performance testing on the perf bots, the
> additional --full-performance-run command line switch will be required.
> 
> The performance_browser_tests isolate config has been changed to run
> browser_tests with the --full-performance-run switch, gtest filters,
> and timeout overrides. Also, the old performance_browser_tests GN test
> target has been turned into a simple group target that groups the
> necessary data dependencies for running on the perf bots.
> 
> This change also includes some changes to allow the tests to complete
> in the absence of tracing data when in quick-check mode. While most
> bots do collect a reasonable amount of tracing data in a few short
> seconds, one or two don't always do so. Unfortunately, we can't justify
> making the CQ run longer to force data collection in all cases.
> 
> Bug: 804191
> Change-Id: I2de6c0b94453981fb0d9015f6b3f0e42f57cc2a6
> Reviewed-on: https://chromium-review.googlesource.com/c/1428399
> Commit-Queue: Yuri Wiitala <miu@chromium.org>
> Reviewed-by: Jochen Eisinger <jochen@chromium.org>
> Reviewed-by: Caleb Rouleau <crouleau@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#632366}

TBR=miu@chromium.org,crouleau@chromium.org,jochen@chromium.org

Change-Id: I4c12b48c4ec82877c06ebb460a08a9944a6f5504
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 804191
Reviewed-on: https://chromium-review.googlesource.com/c/1474282
Reviewed-by: Devlin <rdevlin.cronin@chromium.org>
Commit-Queue: Devlin <rdevlin.cronin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#632412}
  • Loading branch information
rdcronin authored and Commit Bot committed Feb 15, 2019
1 parent e12f7dd commit 744a3a0
Show file tree
Hide file tree
Showing 6 changed files with 200 additions and 300 deletions.
150 changes: 52 additions & 98 deletions chrome/browser/extensions/api/cast_streaming/performance_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,27 +53,10 @@ namespace {
// contribute anything toward stable measurements: A brief moment of startup
// "jank" is acceptable, and shutdown may result in missing events (e.g., if
// streaming stops a few frames before capture stops).
constexpr int kTrimEvents = 24; // 1 sec at 24fps, or 0.4 sec at 60 fps.
constexpr size_t kTrimEvents = 24; // 1 sec at 24fps, or 0.4 sec at 60 fps.

// Minimum number of events required for a reasonable analysis.
constexpr int kMinDataPointsForFullRun = 100; // 1s of audio, ~5s at 24fps.

// Minimum number of events required for data analysis in a non-performance run.
constexpr int kMinDataPointsForQuickRun = 3;

// A convenience macro to run a gtest expectation in the "full performance run"
// setting, or else a warning that something is not being entirely tested in the
// "CQ run" setting. This is required because the test runs in the CQ may not be
// long enough to collect sufficient tracing data; and, unfortunately, there's
// nothing we can do about that.
#define EXPECT_FOR_PERFORMANCE_RUN(expr) \
do { \
if (is_full_performance_run()) { \
EXPECT_TRUE(expr); \
} else if (!(expr)) { \
LOG(WARNING) << "Allowing failure: " << #expr; \
} \
} while (false)
constexpr size_t kMinDataPoints = 100; // 1 sec of audio, or ~5 sec at 24fps.

enum TestFlags {
kSmallWindow = 1 << 2, // Window size: 1 = 800x600, 0 = 2000x1000
Expand Down Expand Up @@ -192,6 +175,7 @@ class MeanAndError {
// If data[x] == x * A + B, then this function returns zero.
// The unit is milliseconds.
static MeanAndError AnalyzeJitter(const std::vector<TimeData>& data) {
CHECK_GT(data.size(), 1UL);
VLOG(0) << "Jitter analysis on " << data.size() << " values.";
std::vector<double> deltas;
double sum = 0.0;
Expand All @@ -201,14 +185,9 @@ static MeanAndError AnalyzeJitter(const std::vector<TimeData>& data) {
deltas.push_back(delta);
sum += delta;
}
if (deltas.empty()) {
// Not enough data. Don't do the following calculation, to avoid a
// divide-by-zero.
} else {
double mean = sum / deltas.size();
for (size_t i = 0; i < deltas.size(); i++) {
deltas[i] = fabs(mean - deltas[i]);
}
double mean = sum / deltas.size();
for (size_t i = 0; i < deltas.size(); i++) {
deltas[i] = fabs(mean - deltas[i]);
}

return MeanAndError(deltas);
Expand All @@ -220,29 +199,23 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
public:
explicit TestPatternReceiver(
const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
const net::IPEndPoint& local_end_point,
bool is_full_performance_run)
const net::IPEndPoint& local_end_point)
: InProcessReceiver(
cast_environment,
local_end_point,
net::IPEndPoint(),
WithAesKeyAndIvSet(media::cast::GetDefaultAudioReceiverConfig()),
WithAesKeyAndIvSet(media::cast::GetDefaultVideoReceiverConfig())),
is_full_performance_run_(is_full_performance_run) {}
WithAesKeyAndIvSet(media::cast::GetDefaultVideoReceiverConfig())) {}

typedef std::map<uint16_t, base::TimeTicks> TimeMap;

bool is_full_performance_run() const { return is_full_performance_run_; }

// Build a map from frame ID (as encoded in the audio and video data)
// to the rtp timestamp for that frame. Note that there will be multiple
// audio frames which all have the same frame ID. When that happens we
// want the minimum rtp timestamp, because that audio frame is supposed
// to play at the same time that the corresponding image is presented.
void MapFrameTimes(const std::vector<TimeData>& events, TimeMap* map) {
const int trim_count = is_full_performance_run_ ? kTrimEvents : 0;
for (int i = trim_count; i < static_cast<int>(events.size()) - trim_count;
i++) {
for (size_t i = kTrimEvents; i < events.size() - kTrimEvents; i++) {
base::TimeTicks& frame_tick = (*map)[events[i].frame_no];
if (frame_tick.is_null()) {
frame_tick = events[i].playout_time;
Expand All @@ -259,14 +232,9 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// 1/30s of "2", etc.
TimeMap audio_frame_times, video_frame_times;
MapFrameTimes(audio_events_, &audio_frame_times);
const int min_data_points = is_full_performance_run_
? kMinDataPointsForFullRun
: kMinDataPointsForQuickRun;
EXPECT_FOR_PERFORMANCE_RUN(min_data_points <=
static_cast<int>(audio_frame_times.size()));
EXPECT_GE(audio_frame_times.size(), kMinDataPoints);
MapFrameTimes(video_events_, &video_frame_times);
EXPECT_FOR_PERFORMANCE_RUN(min_data_points <=
static_cast<int>(video_frame_times.size()));
EXPECT_GE(video_frame_times.size(), kMinDataPoints);
std::vector<double> deltas;
for (TimeMap::const_iterator i = audio_frame_times.begin();
i != audio_frame_times.end();
Expand All @@ -276,8 +244,7 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
deltas.push_back((i->second - j->second).InMillisecondsF());
}
}
EXPECT_FOR_PERFORMANCE_RUN(min_data_points <=
static_cast<int>(deltas.size()));
EXPECT_GE(deltas.size(), kMinDataPoints);

MeanAndError av_sync(deltas);
av_sync.Print(name, modifier, "av_sync", "ms");
Expand All @@ -294,13 +261,11 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// (usually 480 lines or more). Note that this is the video resolution at
// the receiver, but changes originate on the sender side.
std::vector<double> slice_for_analysis;
const int trim_count = is_full_performance_run_ ? kTrimEvents : 0;
if (static_cast<int>(video_frame_lines_.size()) > trim_count * 2) {
slice_for_analysis.reserve(video_frame_lines_.size() - trim_count * 2);
EXPECT_FOR_PERFORMANCE_RUN(
min_data_points <= static_cast<int>(slice_for_analysis.capacity()));
std::transform(video_frame_lines_.begin() + trim_count,
video_frame_lines_.end() - trim_count,
if (video_frame_lines_.size() > kTrimEvents * 2) {
slice_for_analysis.reserve(video_frame_lines_.size() - kTrimEvents * 2);
EXPECT_GE(slice_for_analysis.capacity(), kMinDataPoints);
std::transform(video_frame_lines_.begin() + kTrimEvents,
video_frame_lines_.end() - kTrimEvents,
std::back_inserter(slice_for_analysis),
[](int lines) { return static_cast<double>(lines); });
}
Expand All @@ -311,14 +276,14 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// indicates a lack of data.
int last_lines = -1;
int change_count = 0;
for (int i = trim_count;
i < static_cast<int>(video_frame_lines_.size()) - trim_count; ++i) {
for (size_t i = kTrimEvents; i < video_frame_lines_.size() - kTrimEvents;
++i) {
if (video_frame_lines_[i] != last_lines) {
++change_count;
last_lines = video_frame_lines_[i];
}
}
EXPECT_FOR_PERFORMANCE_RUN(change_count > 0);
EXPECT_GT(change_count, 0);
perf_test::PrintResult(name, modifier, "resolution_changes",
base::NumberToString(change_count), "count", true);
}
Expand All @@ -335,10 +300,6 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
return;
}

TRACE_EVENT_INSTANT1("cast_perf_test", "AudioFramePlayout",
TRACE_EVENT_SCOPE_THREAD, "playout_time",
(playout_time - base::TimeTicks()).InMicroseconds());

// Note: This is the number of the video frame that this audio belongs to.
uint16_t frame_no;
if (media::cast::DecodeTimestamp(audio_frame->channel(0),
Expand Down Expand Up @@ -369,8 +330,6 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
video_frame_lines_.push_back(video_frame->visible_rect().height());
}

const bool is_full_performance_run_;

std::vector<TimeData> audio_events_;
std::vector<TimeData> video_events_;

Expand Down Expand Up @@ -464,6 +423,20 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
TabCapturePerformanceTestBase::SetUpCommandLine(command_line);
}

void GetTraceEvents(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name,
trace_analyzer::TraceEventVector* events) {
trace_analyzer::Query query =
trace_analyzer::Query::EventNameIs(event_name) &&
(trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_FLOW_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_INSTANT) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_COMPLETE));
analyzer->FindEvents(query, events);
VLOG(0) << "Retrieved " << events->size() << " events for: " << event_name;
}

// The key contains the name of the argument and the argument.
typedef std::pair<std::string, double> EventMapKey;
typedef std::map<EventMapKey, const trace_analyzer::TraceEvent*> EventMap;
Expand All @@ -475,7 +448,7 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
const std::string& event_name,
EventMap* event_map) {
trace_analyzer::TraceEventVector events;
QueryTraceEvents(analyzer, event_name, &events);
GetTraceEvents(analyzer, event_name, &events);
for (size_t i = 0; i < events.size(); i++) {
std::map<std::string, double>::const_iterator j;
for (j = events[i]->arg_numbers.begin();
Expand Down Expand Up @@ -528,7 +501,7 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
// from start to finish.
trace_analyzer::TraceEventVector capture_events;
// Sender side:
QueryTraceEvents(analyzer, "Capture", &capture_events);
GetTraceEvents(analyzer, "Capture", &capture_events);
EventMap onbuffer, sink, inserted, encoded, transmitted, decoded, done;
IndexEvents(analyzer, "OnBufferReceived", &onbuffer);
IndexEvents(analyzer, "ConsumeVideoFrame", &sink);
Expand Down Expand Up @@ -558,12 +531,9 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
// producing a matrix of when each frame reached each pipeline checkpoint.
// See the "cheat sheet" below for a description of each pipeline
// checkpoint.
const int trim_count = is_full_performance_run() ? kTrimEvents : 0;
EXPECT_FOR_PERFORMANCE_RUN((trim_count * 2) <=
static_cast<int>(capture_events.size()));
ASSERT_GT(capture_events.size(), 2 * kTrimEvents);
std::vector<std::vector<double>> traced_frames;
for (int i = trim_count;
i < static_cast<int>(capture_events.size()) - trim_count; i++) {
for (size_t i = kTrimEvents; i < capture_events.size() - kTrimEvents; i++) {
std::vector<double> times;
const trace_analyzer::TraceEvent* event = capture_events[i];
if (!event->other_event)
Expand All @@ -588,15 +558,10 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,

// Report the fraction of captured frames that were dropped somewhere along
// the way (i.e., before playout at the receiver).
const int capture_event_count = capture_events.size() - 2 * trim_count;
EXPECT_FOR_PERFORMANCE_RUN((is_full_performance_run()
? kMinDataPointsForFullRun
: kMinDataPointsForQuickRun) <=
capture_event_count);
const size_t capture_event_count = capture_events.size() - 2 * kTrimEvents;
EXPECT_GE(capture_event_count, kMinDataPoints);
const double success_percent =
(capture_event_count == 0)
? NAN
: (100.0 * traced_frames.size() / capture_event_count);
100.0 * traced_frames.size() / capture_event_count;
perf_test::PrintResult(
test_name, GetSuffixForTestFlags(), "frame_drop_rate",
base::StringPrintf("%f", 100 - success_percent), "percent", true);
Expand Down Expand Up @@ -626,12 +591,10 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
MeanAndError AnalyzeTraceDistance(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name) {
trace_analyzer::TraceEventVector events;
QueryTraceEvents(analyzer, event_name, &events);
GetTraceEvents(analyzer, event_name, &events);

const int trim_count = is_full_performance_run() ? kTrimEvents : 0;
std::vector<double> deltas;
for (int i = trim_count + 1;
i < static_cast<int>(events.size()) - trim_count; ++i) {
for (size_t i = kTrimEvents + 1; i < events.size() - kTrimEvents; ++i) {
double delta_micros = events[i]->timestamp - events[i - 1]->timestamp;
deltas.push_back(delta_micros / 1000.0);
}
Expand Down Expand Up @@ -674,8 +637,8 @@ IN_PROC_BROWSER_TEST_P(CastV2PerformanceTest, Performance) {
}
scoped_refptr<media::cast::StandaloneCastEnvironment> cast_environment(
new SkewedCastEnvironment(delta));
TestPatternReceiver* const receiver = new TestPatternReceiver(
cast_environment, receiver_end_point, is_full_performance_run());
TestPatternReceiver* const receiver =
new TestPatternReceiver(cast_environment, receiver_end_point);
receiver->Start();

// Create a proxy for the UDP packets that simulates certain network
Expand Down Expand Up @@ -730,29 +693,20 @@ IN_PROC_BROWSER_TEST_P(CastV2PerformanceTest, Performance) {
javascript_to_play_video));

// Observe the running browser for a while, collecting a trace.
TraceAnalyzerUniquePtr analyzer = TraceAndObserve(
"gpu.capture,cast_perf_test",
std::vector<base::StringPiece>{
// From the Compositor/Capture pipeline...
"Capture", "OnBufferReceived", "ConsumeVideoFrame",
// From the Cast Sender's pipeline...
"InsertRawVideoFrame", "VideoFrameEncoded",
// From the Cast Receiver's pipeline...
"PullEncodedVideoFrame", "VideoFrameDecoded",
// From the TestPatternReceiver (see class above!)...
"VideoFramePlayout", "AudioFramePlayout"},
// In a full performance run, events will be trimmed from both ends of
// trace. Otherwise, just require the bare-minimum to verify the stats
// calculations will work.
is_full_performance_run() ? (2 * kTrimEvents + kMinDataPointsForFullRun)
: kMinDataPointsForQuickRun);
const std::string json_events = TraceAndObserve("gpu.capture,cast_perf_test");

// Shut down the receiver and all the CastEnvironment threads.
VLOG(1) << "Shutting-down receiver and CastEnvironment...";
receiver->Stop();
cast_environment->Shutdown();

VLOG(2) << "Dump of trace events (trace_events.json.gz.b64):\n"
<< MakeBase64EncodedGZippedString(json_events);

VLOG(1) << "Analyzing trace events...";
std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer;
analyzer.reset(trace_analyzer::TraceAnalyzer::Create(json_events));
analyzer->AssociateAsyncBeginEndEvents();

// This prints out the average time between capture events.
// Depending on the test, the capture frame rate is capped (e.g., at 30fps,
Expand Down
Loading

0 comments on commit 744a3a0

Please sign in to comment.