Skip to content

Commit

Permalink
Add encoding time and and fps to webrtc.stress case.
Browse files Browse the repository at this point in the history
This is going to show the metrics for the first 5 peer connections;
I hope that's enough since showing all 30 makes the test really
annoying to triage.

BUG=632299
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.perf:linux_perf_cq;master.tryserver.chromium.perf:mac_retina_perf_cq;master.tryserver.chromium.perf:winx64_10_perf_cq

Review-Url: https://codereview.chromium.org/2561603003
Cr-Commit-Position: refs/heads/master@{#438131}
  • Loading branch information
phoglund authored and Commit bot committed Dec 13, 2016
1 parent 63b2452 commit e12e3f4
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 20 deletions.
4 changes: 3 additions & 1 deletion tools/perf/benchmarks/webrtc.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@ def Name(cls):
return 'webrtc.stress'

def CreatePageTest(self, options):
return webrtc.WebRTC(use_webrtc_stats=False)
# Exclude all stats.
return webrtc.WebRTC(particular_metrics=['googAvgEncodeMs',
'googFrameRateReceived'])


# WebrtcRendering must be a PerfBenchmark, and not a _Webrtc, because it is a
Expand Down
22 changes: 13 additions & 9 deletions tools/perf/measurements/webrtc.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,29 +13,34 @@
class WebRTC(legacy_page_test.LegacyPageTest):
"""Gathers WebRTC-related metrics on a page set."""

def __init__(self, use_webrtc_stats=True):
def __init__(self, particular_metrics=None):
"""Create the measurement and include selected stats.
Args:
particular_metrics: A list of the stats to include (see webrtc_stats.py
for a list of valid names) or None to select all metrics.
"""
super(WebRTC, self).__init__()
self._cpu_metric = None
self._media_metric = None
self._power_metric = None
self._use_webrtc_stats = use_webrtc_stats
self._particular_metrics = particular_metrics
self._webrtc_stats_metric = None

def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)

def DidStartBrowser(self, browser):
self._cpu_metric = cpu.CpuMetric(browser)
if self._use_webrtc_stats:
self._webrtc_stats_metric = webrtc_stats.WebRtcStatisticsMetric()
self._webrtc_stats_metric = webrtc_stats.WebRtcStatisticsMetric(
self._particular_metrics)

def DidNavigateToPage(self, page, tab):
self._cpu_metric.Start(page, tab)
self._media_metric = media.MediaMetric(tab)
self._media_metric.Start(page, tab)
self._power_metric.Start(page, tab)
if self._use_webrtc_stats:
self._webrtc_stats_metric.Start(page, tab)
self._webrtc_stats_metric.Start(page, tab)

def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--use-fake-device-for-media-stream')
Expand All @@ -56,9 +61,8 @@ def ValidateAndMeasurePage(self, page, tab, results):
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)

if self._use_webrtc_stats:
self._webrtc_stats_metric.Stop(page, tab)
self._webrtc_stats_metric.AddResults(tab, results)
self._webrtc_stats_metric.Stop(page, tab)
self._webrtc_stats_metric.AddResults(tab, results)

def DidRunPage(self, platform):
del platform # unused
Expand Down
33 changes: 29 additions & 4 deletions tools/perf/metrics/webrtc_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,16 @@
}


def SelectMetrics(particular_metrics):
if not particular_metrics:
return INTERESTING_METRICS

# You can only select among the predefined interesting metrics.
assert set(particular_metrics).issubset(INTERESTING_METRICS.keys())
return {key: value for key, value in INTERESTING_METRICS.iteritems()
if key in particular_metrics}


def GetReportKind(report):
if 'audioInputLevel' in report or 'audioOutputLevel' in report:
return 'audio'
Expand All @@ -94,12 +104,12 @@ def StripAudioVideoBweDistinction(stat_name):
return re.sub('^(audio|video|bwe)_', '', stat_name)


def SortStatsIntoTimeSeries(report_batches):
def SortStatsIntoTimeSeries(report_batches, selected_metrics):
time_series = {}
for report_batch in report_batches:
for report in report_batch:
for stat_name, value in report.iteritems():
if stat_name not in INTERESTING_METRICS:
if stat_name not in selected_metrics:
continue
if GetReportKind(report) == 'unknown':
continue
Expand All @@ -109,12 +119,21 @@ def SortStatsIntoTimeSeries(report_batches):
return time_series


def PrintSpecialMarkerValue(results):
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, 'peer_connection_5_not_logging_more_conns',
'', [17], description=('This marker signifies we never log more '
'than 5 peer connections'),
important=False))


class WebRtcStatisticsMetric(Metric):
"""Makes it possible to measure stats from peer connections."""

def __init__(self):
def __init__(self, particular_metrics=None):
super(WebRtcStatisticsMetric, self).__init__()
self._all_reports = None
self._selected_metrics = SelectMetrics(particular_metrics)

def Start(self, page, tab):
pass
Expand All @@ -130,7 +149,13 @@ def AddResults(self, tab, results):

reports = json.loads(self._all_reports)
for i, report in enumerate(reports):
time_series = SortStatsIntoTimeSeries(report)
time_series = SortStatsIntoTimeSeries(report, self._selected_metrics)

# Only ever show stats for 5 peer connections, or it's going to look
# insane in the results.
if i > 5:
PrintSpecialMarkerValue(results)
return

for stat_name, values in time_series.iteritems():
stat_name_underscored = camel_case.ToUnderscore(stat_name)
Expand Down
26 changes: 20 additions & 6 deletions tools/perf/metrics/webrtc_stats_unittest.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,7 @@ def AddValue(self, value):

class WebRtcStatsUnittest(unittest.TestCase):

def _RunMetricOnJson(self, json_to_return):
stats_metric = webrtc_stats.WebRtcStatisticsMetric()

def _RunMetricOnJson(self, json_to_return, stats_metric):
tab = simple_mock.MockObject()
page = simple_mock.MockObject()

Expand All @@ -125,7 +123,8 @@ def _RunMetricOnJson(self, json_to_return):
return results

def testExtractsValuesAsTimeSeries(self):
results = self._RunMetricOnJson(SAMPLE_JSON)
stats_metric = webrtc_stats.WebRtcStatisticsMetric()
results = self._RunMetricOnJson(SAMPLE_JSON, stats_metric)

self.assertTrue(results.received_values,
'Expected values for googDecodeMs and others, got none.')
Expand All @@ -139,7 +138,8 @@ def testExtractsValuesAsTimeSeries(self):
[100.0, 101.0])

def testExtractsInterestingMetricsOnly(self):
results = self._RunMetricOnJson(SAMPLE_JSON)
stats_metric = webrtc_stats.WebRtcStatisticsMetric()
results = self._RunMetricOnJson(SAMPLE_JSON, stats_metric)

self.assertTrue(len(results.received_values) > 0)
self.assertIn('peer_connection_0', results.received_values[0].name,
Expand All @@ -159,6 +159,20 @@ def testExtractsInterestingMetricsOnly(self):
'should not be reported since it is not interesting.')
self.assertNotIn('peer_connection_1_audio_audio_input_level', all_names)

def testExtractsParticularMetricsOnlyIfSpecified(self):
only_goog_rtt_and_max_decode = ['googRtt', 'googMaxDecodeMs']
stats_metric = webrtc_stats.WebRtcStatisticsMetric(
particular_metrics=only_goog_rtt_and_max_decode)
results = self._RunMetricOnJson(SAMPLE_JSON, stats_metric)

received_names = [value.name for value in results.received_values]
expected_names = ['peer_connection_0_audio_goog_rtt',
'peer_connection_0_video_goog_rtt',
'peer_connection_1_video_goog_max_decode_ms',
'peer_connection_1_video_goog_rtt']
self.assertEqual(expected_names, received_names)

def testReturnsIfJsonIsEmpty(self):
results = self._RunMetricOnJson('[]')
stats_metric = webrtc_stats.WebRtcStatisticsMetric()
results = self._RunMetricOnJson('[]', stats_metric)
self.assertFalse(results.received_values)

0 comments on commit e12e3f4

Please sign in to comment.