aboutsummaryrefslogtreecommitdiffstats
path: root/etc
diff options
context:
space:
mode:
Diffstat (limited to 'etc')
-rwxr-xr-xetc/ci/bencher.py22
-rw-r--r--etc/ci/chaos_monkey_test.py4
-rw-r--r--etc/ci/check_dynamic_symbols.py65
-rw-r--r--etc/ci/performance/download_buildbot_timings.py143
-rw-r--r--etc/ci/performance/gecko_driver.py29
-rw-r--r--etc/ci/performance/runner.py222
-rw-r--r--etc/ci/performance/set_s3_policy.py11
-rw-r--r--etc/ci/performance/submit_to_perfherder.py300
-rw-r--r--etc/ci/performance/submit_to_s3.py17
-rw-r--r--etc/ci/performance/test_differ.py18
-rw-r--r--etc/ci/performance/test_runner.py496
-rw-r--r--etc/ci/performance/test_submit_to_perfherder.py76
-rwxr-xr-xetc/ci/report_aggregated_expected_results.py107
-rwxr-xr-xetc/crates-graph.py1
-rwxr-xr-xetc/devtools_parser.py27
-rwxr-xr-xetc/memory_reports_over_time.py88
-rwxr-xr-xetc/patch-trace-template.py26
-rw-r--r--etc/profilicate.py118
-rwxr-xr-xetc/run_in_headless_android_emulator.py13
-rw-r--r--etc/servo_automation_screenshot.py59
-rw-r--r--etc/servo_gdb.py24
-rw-r--r--etc/start_servo.py4
-rw-r--r--etc/wpt-summarize.py10
-rw-r--r--etc/wpt-timing.py22
-rw-r--r--etc/wpt_result_analyzer.py18
25 files changed, 942 insertions, 978 deletions
diff --git a/etc/ci/bencher.py b/etc/ci/bencher.py
index f2683b2b227..970d1dfc6da 100755
--- a/etc/ci/bencher.py
+++ b/etc/ci/bencher.py
@@ -17,27 +17,31 @@ import os
def size(args):
size = os.path.getsize(args.binary)
print(size)
- with open(args.bmf_output, 'w', encoding='utf-8') as f:
- json.dump({
- args.variant: {
- 'file-size': {
- 'value': float(size),
+ with open(args.bmf_output, "w", encoding="utf-8") as f:
+ json.dump(
+ {
+ args.variant: {
+ "file-size": {
+ "value": float(size),
+ }
}
- }
- }, f, indent=4)
+ },
+ f,
+ indent=4,
+ )
def merge(args):
output: dict[str, object] = dict()
for input_file in args.inputs:
- with open(input_file, 'r', encoding='utf-8') as f:
+ with open(input_file, "r", encoding="utf-8") as f:
data = json.load(f)
diff = set(data) & set(output)
if diff:
print("Duplicated keys:", diff)
output = data | output
- with open(args.bmf_output, 'w', encoding='utf-8') as f:
+ with open(args.bmf_output, "w", encoding="utf-8") as f:
json.dump(output, f, indent=4)
diff --git a/etc/ci/chaos_monkey_test.py b/etc/ci/chaos_monkey_test.py
index 1a683dc905f..5e94e8b4b15 100644
--- a/etc/ci/chaos_monkey_test.py
+++ b/etc/ci/chaos_monkey_test.py
@@ -24,7 +24,7 @@ TEST_CMD = [
"--log-raw=-",
# We run the content-security-policy test because it creates
# cross-origin iframes, which are good for stress-testing pipelines
- "content-security-policy"
+ "content-security-policy",
]
# Note that there will probably be test failures caused
@@ -35,7 +35,7 @@ test_results = Popen(TEST_CMD, stdout=PIPE)
any_crashes = False
for line in test_results.stdout:
- report = json.loads(line.decode('utf-8'))
+ report = json.loads(line.decode("utf-8"))
if report.get("action") == "process_output":
print("{} - {}".format(report.get("thread"), report.get("data")))
status = report.get("status")
diff --git a/etc/ci/check_dynamic_symbols.py b/etc/ci/check_dynamic_symbols.py
index bc050388c14..dd280b54e0c 100644
--- a/etc/ci/check_dynamic_symbols.py
+++ b/etc/ci/check_dynamic_symbols.py
@@ -12,35 +12,46 @@ import re
import subprocess
import sys
-symbol_regex = re.compile(br"D \*UND\*\t(.*) (.*)$")
-allowed_symbols = frozenset([
- b'unshare',
- b'malloc_usable_size',
- b'__cxa_type_match',
- b'signal',
- b'tcgetattr',
- b'tcsetattr',
- b'__strncpy_chk2',
- b'rand',
- b'__read_chk',
- b'fesetenv',
- b'srand',
- b'abs',
- b'fegetenv',
- b'sigemptyset',
- b'AHardwareBuffer_allocate',
- b'AHardwareBuffer_release',
- b'getentropy',
-])
+symbol_regex = re.compile(rb"D \*UND\*\t(.*) (.*)$")
+allowed_symbols = frozenset(
+ [
+ b"unshare",
+ b"malloc_usable_size",
+ b"__cxa_type_match",
+ b"signal",
+ b"tcgetattr",
+ b"tcsetattr",
+ b"__strncpy_chk2",
+ b"rand",
+ b"__read_chk",
+ b"fesetenv",
+ b"srand",
+ b"abs",
+ b"fegetenv",
+ b"sigemptyset",
+ b"AHardwareBuffer_allocate",
+ b"AHardwareBuffer_release",
+ b"getentropy",
+ ]
+)
actual_symbols = set()
-objdump_output = subprocess.check_output([
- os.path.join(
- 'android-toolchains', 'ndk', 'toolchains', 'arm-linux-androideabi-4.9',
- 'prebuilt', 'linux-x86_64', 'bin', 'arm-linux-androideabi-objdump'),
- '-T',
- 'target/android/armv7-linux-androideabi/debug/libservoshell.so']
-).split(b'\n')
+objdump_output = subprocess.check_output(
+ [
+ os.path.join(
+ "android-toolchains",
+ "ndk",
+ "toolchains",
+ "arm-linux-androideabi-4.9",
+ "prebuilt",
+ "linux-x86_64",
+ "bin",
+ "arm-linux-androideabi-objdump",
+ ),
+ "-T",
+ "target/android/armv7-linux-androideabi/debug/libservoshell.so",
+ ]
+).split(b"\n")
for line in objdump_output:
m = symbol_regex.search(line)
diff --git a/etc/ci/performance/download_buildbot_timings.py b/etc/ci/performance/download_buildbot_timings.py
index 738f23d0187..8bc18813d2e 100644
--- a/etc/ci/performance/download_buildbot_timings.py
+++ b/etc/ci/performance/download_buildbot_timings.py
@@ -16,43 +16,47 @@ SCRIPT_PATH = os.path.split(__file__)[0]
def main():
- default_output_dir = os.path.join(SCRIPT_PATH, 'output')
- default_cache_dir = os.path.join(SCRIPT_PATH, '.cache')
-
- parser = argparse.ArgumentParser(
- description="Download buildbot metadata"
+ default_output_dir = os.path.join(SCRIPT_PATH, "output")
+ default_cache_dir = os.path.join(SCRIPT_PATH, ".cache")
+
+ parser = argparse.ArgumentParser(description="Download buildbot metadata")
+ parser.add_argument(
+ "--index-url",
+ type=str,
+ default="https://build.servo.org/json",
+ help="the URL to get the JSON index data index from. Default: https://build.servo.org/json",
+ )
+ parser.add_argument(
+ "--build-url",
+ type=str,
+ default="https://build.servo.org/json/builders/{}/builds/{}",
+ help="the URL to get the JSON build data from. Default: https://build.servo.org/json/builders/{}/builds/{}",
+ )
+ parser.add_argument(
+ "--cache-dir",
+ type=str,
+ default=default_cache_dir,
+ help="the directory to cache JSON files in. Default: " + default_cache_dir,
+ )
+ parser.add_argument(
+ "--cache-name",
+ type=str,
+ default="build-{}-{}.json",
+ help="the filename to cache JSON data in. Default: build-{}-{}.json",
+ )
+ parser.add_argument(
+ "--output-dir",
+ type=str,
+ default=default_output_dir,
+ help="the directory to save the CSV data to. Default: " + default_output_dir,
)
- parser.add_argument("--index-url",
- type=str,
- default='https://build.servo.org/json',
- help="the URL to get the JSON index data index from. "
- "Default: https://build.servo.org/json")
- parser.add_argument("--build-url",
- type=str,
- default='https://build.servo.org/json/builders/{}/builds/{}',
- help="the URL to get the JSON build data from. "
- "Default: https://build.servo.org/json/builders/{}/builds/{}")
- parser.add_argument("--cache-dir",
- type=str,
- default=default_cache_dir,
- help="the directory to cache JSON files in. Default: " + default_cache_dir)
- parser.add_argument("--cache-name",
- type=str,
- default='build-{}-{}.json',
- help="the filename to cache JSON data in. "
- "Default: build-{}-{}.json")
- parser.add_argument("--output-dir",
- type=str,
- default=default_output_dir,
- help="the directory to save the CSV data to. Default: " + default_output_dir)
- parser.add_argument("--output-name",
- type=str,
- default='builds-{}-{}.csv',
- help="the filename to save the CSV data to. "
- "Default: builds-{}-{}.csv")
- parser.add_argument("--verbose", "-v",
- action='store_true',
- help="print every HTTP request")
+ parser.add_argument(
+ "--output-name",
+ type=str,
+ default="builds-{}-{}.csv",
+ help="the filename to save the CSV data to. Default: builds-{}-{}.csv",
+ )
+ parser.add_argument("--verbose", "-v", action="store_true", help="print every HTTP request")
args = parser.parse_args()
os.makedirs(args.cache_dir, exist_ok=True)
@@ -63,7 +67,7 @@ def main():
if args.verbose:
print("Downloading index {}.".format(args.index_url))
with urlopen(args.index_url) as response:
- index = json.loads(response.read().decode('utf-8'))
+ index = json.loads(response.read().decode("utf-8"))
builds = []
@@ -75,12 +79,11 @@ def main():
if args.verbose:
print("Downloading recent build {}.".format(recent_build_url))
with urlopen(recent_build_url) as response:
- recent_build = json.loads(response.read().decode('utf-8'))
+ recent_build = json.loads(response.read().decode("utf-8"))
recent_build_number = recent_build["number"]
# Download each build, and convert to CSV
for build_number in range(0, recent_build_number):
-
# Rather annoyingly, we can't just use the Python http cache,
# because it doesn't cache 404 responses. So we roll our own.
cache_json_name = args.cache_name.format(builder, build_number)
@@ -96,7 +99,7 @@ def main():
print("Downloading build {}.".format(build_url))
try:
with urlopen(build_url) as response:
- build = json.loads(response.read().decode('utf-8'))
+ build = json.loads(response.read().decode("utf-8"))
except HTTPError as e:
if e.code == 404:
build = {}
@@ -104,46 +107,46 @@ def main():
raise
# Don't cache current builds.
- if build.get('currentStep'):
+ if build.get("currentStep"):
continue
- with open(cache_json, 'w+') as f:
+ with open(cache_json, "w+") as f:
json.dump(build, f)
- if 'times' in build:
+ if "times" in build:
builds.append(build)
years = {}
for build in builds:
- build_date = date.fromtimestamp(build['times'][0])
+ build_date = date.fromtimestamp(build["times"][0])
years.setdefault(build_date.year, {}).setdefault(build_date.month, []).append(build)
for year, months in years.items():
for month, builds in months.items():
-
output_name = args.output_name.format(year, month)
output = os.path.join(args.output_dir, output_name)
# Create the CSV file.
if args.verbose:
- print('Creating file {}.'.format(output))
- with open(output, 'w+') as output_file:
+ print("Creating file {}.".format(output))
+ with open(output, "w+") as output_file:
output_csv = csv.writer(output_file)
# The CSV column names
- output_csv.writerow([
- 'builder',
- 'buildNumber',
- 'buildTimestamp',
- 'stepName',
- 'stepText',
- 'stepNumber',
- 'stepStart',
- 'stepFinish'
- ])
+ output_csv.writerow(
+ [
+ "builder",
+ "buildNumber",
+ "buildTimestamp",
+ "stepName",
+ "stepText",
+ "stepNumber",
+ "stepStart",
+ "stepFinish",
+ ]
+ )
for build in builds:
-
builder = build["builderName"]
build_number = build["number"]
build_timestamp = datetime.fromtimestamp(build["times"][0]).replace(microsecond=0)
@@ -152,20 +155,22 @@ def main():
for step in build["steps"]:
if step["isFinished"]:
step_name = step["name"]
- step_text = ' '.join(step["text"])
+ step_text = " ".join(step["text"])
step_number = step["step_number"]
step_start = floor(step["times"][0])
step_finish = floor(step["times"][1])
- output_csv.writerow([
- builder,
- build_number,
- build_timestamp,
- step_name,
- step_text,
- step_number,
- step_start,
- step_finish
- ])
+ output_csv.writerow(
+ [
+ builder,
+ build_number,
+ build_timestamp,
+ step_name,
+ step_text,
+ step_number,
+ step_start,
+ step_finish,
+ ]
+ )
if __name__ == "__main__":
diff --git a/etc/ci/performance/gecko_driver.py b/etc/ci/performance/gecko_driver.py
index b10c5432f1d..789b3223403 100644
--- a/etc/ci/performance/gecko_driver.py
+++ b/etc/ci/performance/gecko_driver.py
@@ -15,7 +15,7 @@ import sys
@contextmanager
def create_gecko_session():
try:
- firefox_binary = os.environ['FIREFOX_BIN']
+ firefox_binary = os.environ["FIREFOX_BIN"]
except KeyError:
print("+=============================================================+")
print("| You must set the path to your firefox binary to FIREFOX_BIN |")
@@ -36,10 +36,7 @@ def generate_placeholder(testcase):
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
- timings = {
- "testcase": testcase,
- "title": ""
- }
+ timings = {"testcase": testcase, "title": ""}
timing_names = [
"navigationStart",
@@ -81,16 +78,9 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
return generate_placeholder(testcase)
try:
- timings = {
- "testcase": testcase,
- "title": driver.title.replace(",", ",")
- }
-
- timings.update(json.loads(
- driver.execute_script(
- "return JSON.stringify(performance.timing)"
- )
- ))
+ timings = {"testcase": testcase, "title": driver.title.replace(",", ",")}
+
+ timings.update(json.loads(driver.execute_script("return JSON.stringify(performance.timing)")))
except Exception:
# We need to return a timing object no matter what happened.
# See the comment in generate_placeholder() for explanation
@@ -101,17 +91,14 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
# TODO: the timeout is hardcoded
driver.implicitly_wait(5) # sec
driver.find_element_by_id("GECKO_TEST_DONE")
- timings.update(json.loads(
- driver.execute_script(
- "return JSON.stringify(window.customTimers)"
- )
- ))
+ timings.update(json.loads(driver.execute_script("return JSON.stringify(window.customTimers)")))
return [timings]
-if __name__ == '__main__':
+if __name__ == "__main__":
# Just for manual testing
from pprint import pprint
+
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
pprint(run_gecko_test(url, 15))
diff --git a/etc/ci/performance/runner.py b/etc/ci/performance/runner.py
index b5cdddedb30..36dc96b1094 100644
--- a/etc/ci/performance/runner.py
+++ b/etc/ci/performance/runner.py
@@ -23,14 +23,13 @@ SYSTEM = platform.system()
def load_manifest(filename):
- with open(filename, 'r') as f:
+ with open(filename, "r") as f:
text = f.read()
return list(parse_manifest(text))
def parse_manifest(text):
- lines = filter(lambda x: x != "" and not x.startswith("#"),
- map(lambda x: x.strip(), text.splitlines()))
+ lines = filter(lambda x: x != "" and not x.startswith("#"), map(lambda x: x.strip(), text.splitlines()))
output = []
for line in lines:
if line.split(" ")[0] == "async":
@@ -46,21 +45,18 @@ def testcase_url(base, testcase):
# the server on port 80. To allow non-root users to run the test
# case, we take the URL to be relative to a base URL.
(scheme, netloc, path, query, fragment) = urlsplit(testcase)
- relative_url = urlunsplit(('', '', '.' + path, query, fragment))
+ relative_url = urlunsplit(("", "", "." + path, query, fragment))
absolute_url = urljoin(base, relative_url)
return absolute_url
def execute_test(url, command, timeout):
try:
- return subprocess.check_output(
- command, stderr=subprocess.STDOUT, timeout=timeout
- )
+ return subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
- print("You may want to re-run the test manually:\n{}"
- .format(' '.join(command)))
+ print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(url))
return ""
@@ -74,22 +70,21 @@ def run_servo_test(testcase, url, date, timeout, is_async):
ua_script_path = "{}/user-agent-js".format(os.getcwd())
command = [
- "../../../target/release/servo", url,
+ "../../../target/release/servo",
+ url,
"--userscripts=" + ua_script_path,
"--headless",
- "-x", "-o", "output.png"
+ "-x",
+ "-o",
+ "output.png",
]
log = ""
try:
- log = subprocess.check_output(
- command, stderr=subprocess.STDOUT, timeout=timeout
- )
+ log = subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
- print("You may want to re-run the test manually:\n{}".format(
- ' '.join(command)
- ))
+ print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(testcase))
return parse_log(log, testcase, url, date)
@@ -100,7 +95,7 @@ def parse_log(log, testcase, url, date):
block = []
copy = False
for line_bytes in log.splitlines():
- line = line_bytes.decode('utf-8')
+ line = line_bytes.decode("utf-8")
if line.strip() == ("[PERF] perf block start"):
copy = True
@@ -119,10 +114,10 @@ def parse_log(log, testcase, url, date):
except ValueError:
print("[DEBUG] failed to parse the following line:")
print(line)
- print('[DEBUG] log:')
- print('-----')
+ print("[DEBUG] log:")
+ print("-----")
print(log)
- print('-----')
+ print("-----")
return None
if key == "testcase" or key == "title":
@@ -133,10 +128,12 @@ def parse_log(log, testcase, url, date):
return timing
def valid_timing(timing, url=None):
- if (timing is None
- or testcase is None
- or timing.get('title') == 'Error loading page'
- or timing.get('testcase') != url):
+ if (
+ timing is None
+ or testcase is None
+ or timing.get("title") == "Error loading page"
+ or timing.get("testcase") != url
+ ):
return False
else:
return True
@@ -178,10 +175,10 @@ def parse_log(log, testcase, url, date):
# Set the testcase field to contain the original testcase name,
# rather than the url.
def set_testcase(timing, testcase=None, date=None):
- timing['testcase'] = testcase
- timing['system'] = SYSTEM
- timing['machine'] = MACHINE
- timing['date'] = date
+ timing["testcase"] = testcase
+ timing["system"] = SYSTEM
+ timing["machine"] = MACHINE
+ timing["date"] = date
return timing
valid_timing_for_case = partial(valid_timing, url=url)
@@ -190,10 +187,10 @@ def parse_log(log, testcase, url, date):
if len(timings) == 0:
print("Didn't find any perf data in the log, test timeout?")
- print('[DEBUG] log:')
- print('-----')
+ print("[DEBUG] log:")
+ print("-----")
print(log)
- print('-----')
+ print("-----")
return [create_placeholder(testcase)]
else:
@@ -204,22 +201,25 @@ def filter_result_by_manifest(result_json, manifest, base):
filtered = []
for name, is_async in manifest:
url = testcase_url(base, name)
- match = [tc for tc in result_json if tc['testcase'] == url]
+ match = [tc for tc in result_json if tc["testcase"] == url]
if len(match) == 0:
- raise Exception(("Missing test result: {}. This will cause a "
- "discontinuity in the treeherder graph, "
- "so we won't submit this data.").format(name))
+ raise Exception(
+ (
+ "Missing test result: {}. This will cause a "
+ "discontinuity in the treeherder graph, "
+ "so we won't submit this data."
+ ).format(name)
+ )
filtered += match
return filtered
def take_result_median(result_json, expected_runs):
median_results = []
- for k, g in itertools.groupby(result_json, lambda x: x['testcase']):
+ for k, g in itertools.groupby(result_json, lambda x: x["testcase"]):
group = list(g)
if len(group) != expected_runs:
- print(("Warning: Not enough test data for {},"
- " maybe some runs failed?").format(k))
+ print(("Warning: Not enough test data for {}, maybe some runs failed?").format(k))
median_result = {}
for k, _ in group[0].items():
@@ -227,8 +227,7 @@ def take_result_median(result_json, expected_runs):
median_result[k] = group[0][k]
else:
try:
- median_result[k] = median([x[k] for x in group
- if x[k] is not None])
+ median_result[k] = median([x[k] for x in group if x[k] is not None])
except StatisticsError:
median_result[k] = -1
median_results.append(median_result)
@@ -236,72 +235,65 @@ def take_result_median(result_json, expected_runs):
def save_result_json(results, filename, manifest, expected_runs, base):
-
results = filter_result_by_manifest(results, manifest, base)
results = take_result_median(results, expected_runs)
if len(results) == 0:
- with open(filename, 'w') as f:
- json.dump("No test result found in the log. All tests timeout?",
- f, indent=2)
+ with open(filename, "w") as f:
+ json.dump("No test result found in the log. All tests timeout?", f, indent=2)
else:
- with open(filename, 'w') as f:
+ with open(filename, "w") as f:
json.dump(results, f, indent=2)
print("Result saved to {}".format(filename))
def save_result_csv(results, filename, manifest, expected_runs, base):
-
fieldnames = [
- 'system',
- 'machine',
- 'date',
- 'testcase',
- 'title',
- 'connectEnd',
- 'connectStart',
- 'domComplete',
- 'domContentLoadedEventEnd',
- 'domContentLoadedEventStart',
- 'domInteractive',
- 'domLoading',
- 'domainLookupEnd',
- 'domainLookupStart',
- 'fetchStart',
- 'loadEventEnd',
- 'loadEventStart',
- 'navigationStart',
- 'redirectEnd',
- 'redirectStart',
- 'requestStart',
- 'responseEnd',
- 'responseStart',
- 'secureConnectionStart',
- 'unloadEventEnd',
- 'unloadEventStart',
+ "system",
+ "machine",
+ "date",
+ "testcase",
+ "title",
+ "connectEnd",
+ "connectStart",
+ "domComplete",
+ "domContentLoadedEventEnd",
+ "domContentLoadedEventStart",
+ "domInteractive",
+ "domLoading",
+ "domainLookupEnd",
+ "domainLookupStart",
+ "fetchStart",
+ "loadEventEnd",
+ "loadEventStart",
+ "navigationStart",
+ "redirectEnd",
+ "redirectStart",
+ "requestStart",
+ "responseEnd",
+ "responseStart",
+ "secureConnectionStart",
+ "unloadEventEnd",
+ "unloadEventStart",
]
- successes = [r for r in results if r['domComplete'] != -1]
+ successes = [r for r in results if r["domComplete"] != -1]
- with open(filename, 'w', encoding='utf-8') as csvfile:
+ with open(filename, "w", encoding="utf-8") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
writer.writerows(successes)
def format_result_summary(results):
- failures = list(filter(lambda x: x['domComplete'] == -1, results))
+ failures = list(filter(lambda x: x["domComplete"] == -1, results))
result_log = """
========================================
Total {total} tests; {suc} succeeded, {fail} failed.
Failure summary:
-""".format(
- total=len(results),
- suc=len(list(filter(lambda x: x['domComplete'] != -1, results))),
- fail=len(failures)
- )
- uniq_failures = list(set(map(lambda x: x['testcase'], failures)))
+""".format(total=len(results), suc=len(list(filter(lambda x: x["domComplete"] != -1, results))), fail=len(failures))
+ uniq_failures = list(set(map(lambda x: x["testcase"], failures)))
for failure in uniq_failures:
result_log += " - {}\n".format(failure)
@@ -311,40 +303,40 @@ Failure summary:
def main():
- parser = argparse.ArgumentParser(
- description="Run page load test on servo"
+ parser = argparse.ArgumentParser(description="Run page load test on servo")
+ parser.add_argument("tp5_manifest", help="the test manifest in tp5 format")
+ parser.add_argument("output_file", help="filename for the output json")
+ parser.add_argument(
+ "--base",
+ type=str,
+ default="http://localhost:8000/",
+ help="the base URL for tests. Default: http://localhost:8000/",
+ )
+ parser.add_argument("--runs", type=int, default=20, help="number of runs for each test case. Defult: 20")
+ parser.add_argument(
+ "--timeout",
+ type=int,
+ default=300, # 5 min
+ help=("kill the test if not finished in time (sec). Default: 5 min"),
+ )
+ parser.add_argument(
+ "--date",
+ type=str,
+ default=None, # 5 min
+ help=("the date to use in the CSV file."),
+ )
+ parser.add_argument(
+ "--engine",
+ type=str,
+ default="servo",
+ help=("The engine to run the tests on. Currently only servo and gecko are supported."),
)
- parser.add_argument("tp5_manifest",
- help="the test manifest in tp5 format")
- parser.add_argument("output_file",
- help="filename for the output json")
- parser.add_argument("--base",
- type=str,
- default='http://localhost:8000/',
- help="the base URL for tests. Default: http://localhost:8000/")
- parser.add_argument("--runs",
- type=int,
- default=20,
- help="number of runs for each test case. Defult: 20")
- parser.add_argument("--timeout",
- type=int,
- default=300, # 5 min
- help=("kill the test if not finished in time (sec)."
- " Default: 5 min"))
- parser.add_argument("--date",
- type=str,
- default=None, # 5 min
- help=("the date to use in the CSV file."))
- parser.add_argument("--engine",
- type=str,
- default='servo',
- help=("The engine to run the tests on. Currently only"
- " servo and gecko are supported."))
args = parser.parse_args()
- if args.engine == 'servo':
+ if args.engine == "servo":
run_test = run_servo_test
- elif args.engine == 'gecko':
+ elif args.engine == "gecko":
import gecko_driver # Load this only when we need gecko test
+
run_test = gecko_driver.run_gecko_test
date = args.date or DATE
try:
@@ -354,9 +346,7 @@ def main():
for testcase, is_async in testcases:
url = testcase_url(args.base, testcase)
for run in range(args.runs):
- print("Running test {}/{} on {}".format(run + 1,
- args.runs,
- url))
+ print("Running test {}/{} on {}".format(run + 1, args.runs, url))
# results will be a mixure of timings dict and testcase strings
# testcase string indicates a failed test
results += run_test(testcase, url, date, args.timeout, is_async)
@@ -364,7 +354,7 @@ def main():
# TODO: Record and analyze other performance.timing properties
print(format_result_summary(results))
- if args.output_file.endswith('.csv'):
+ if args.output_file.endswith(".csv"):
save_result_csv(results, args.output_file, testcases, args.runs, args.base)
else:
save_result_json(results, args.output_file, testcases, args.runs, args.base)
diff --git a/etc/ci/performance/set_s3_policy.py b/etc/ci/performance/set_s3_policy.py
index fc9572471d5..504022bd6e4 100644
--- a/etc/ci/performance/set_s3_policy.py
+++ b/etc/ci/performance/set_s3_policy.py
@@ -10,13 +10,14 @@ import boto3
def main():
parser = argparse.ArgumentParser(
- description=("Set the policy of the servo-perf bucket. "
- "Remember to set your S3 credentials "
- "https://github.com/boto/boto3"))
+ description=(
+ "Set the policy of the servo-perf bucket. Remember to set your S3 credentials https://github.com/boto/boto3"
+ )
+ )
parser.parse_args()
- s3 = boto3.resource('s3')
- BUCKET = 'servo-perf'
+ s3 = boto3.resource("s3")
+ BUCKET = "servo-perf"
POLICY = """{
"Version":"2012-10-17",
"Statement":[
diff --git a/etc/ci/performance/submit_to_perfherder.py b/etc/ci/performance/submit_to_perfherder.py
index fe1ee57f86b..895a972d0fb 100644
--- a/etc/ci/performance/submit_to_perfherder.py
+++ b/etc/ci/performance/submit_to_perfherder.py
@@ -11,8 +11,7 @@ import operator
import os
import random
import string
-from thclient import (TreeherderClient, TreeherderResultSetCollection,
- TreeherderJobCollection)
+from thclient import TreeherderClient, TreeherderResultSetCollection, TreeherderJobCollection
import time
from runner import format_result_summary
@@ -24,33 +23,28 @@ def geometric_mean(iterable):
def format_testcase_name(name):
- temp = name.replace('http://localhost:8000/page_load_test/', '')
- temp = temp.replace('http://localhost:8000/tp6/', '')
- temp = temp.split('/')[0]
+ temp = name.replace("http://localhost:8000/page_load_test/", "")
+ temp = temp.replace("http://localhost:8000/tp6/", "")
+ temp = temp.split("/")[0]
temp = temp[0:80]
return temp
-def format_perf_data(perf_json, engine='servo'):
+def format_perf_data(perf_json, engine="servo"):
suites = []
measurement = "domComplete" # Change this to an array when we have more
def get_time_from_nav_start(timings, measurement):
- return timings[measurement] - timings['navigationStart']
+ return timings[measurement] - timings["navigationStart"]
- measurementFromNavStart = partial(get_time_from_nav_start,
- measurement=measurement)
+ measurementFromNavStart = partial(get_time_from_nav_start, measurement=measurement)
- if (engine == 'gecko'):
- name = 'gecko.{}'.format(measurement)
+ if engine == "gecko":
+ name = "gecko.{}".format(measurement)
else:
name = measurement
- suite = {
- "name": name,
- "value": geometric_mean(map(measurementFromNavStart, perf_json)),
- "subtests": []
- }
+ suite = {"name": name, "value": geometric_mean(map(measurementFromNavStart, perf_json)), "subtests": []}
for testcase in perf_json:
if measurementFromNavStart(testcase) < 0:
value = -1
@@ -58,10 +52,7 @@ def format_perf_data(perf_json, engine='servo'):
else:
value = measurementFromNavStart(testcase)
- suite["subtests"].append({
- "name": format_testcase_name(testcase["testcase"]),
- "value": value
- })
+ suite["subtests"].append({"name": format_testcase_name(testcase["testcase"]), "value": value})
suites.append(suite)
@@ -69,7 +60,7 @@ def format_perf_data(perf_json, engine='servo'):
"performance_data": {
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
"framework": {"name": "servo-perf"},
- "suites": suites
+ "suites": suites,
}
}
@@ -82,20 +73,20 @@ def create_resultset_collection(dataset):
for data in dataset:
trs = trsc.get_resultset()
- trs.add_push_timestamp(data['push_timestamp'])
- trs.add_revision(data['revision'])
- trs.add_author(data['author'])
+ trs.add_push_timestamp(data["push_timestamp"])
+ trs.add_revision(data["revision"])
+ trs.add_author(data["author"])
# TODO: figure out where type is used
# trs.add_type(data['type'])
revisions = []
- for rev in data['revisions']:
+ for rev in data["revisions"]:
tr = trs.get_revision()
- tr.add_revision(rev['revision'])
- tr.add_author(rev['author'])
- tr.add_comment(rev['comment'])
- tr.add_repository(rev['repository'])
+ tr.add_revision(rev["revision"])
+ tr.add_author(rev["author"])
+ tr.add_comment(rev["comment"])
+ tr.add_repository(rev["repository"])
revisions.append(tr)
trs.add_revisions(revisions)
@@ -114,46 +105,42 @@ def create_job_collection(dataset):
for data in dataset:
tj = tjc.get_job()
- tj.add_revision(data['revision'])
- tj.add_project(data['project'])
- tj.add_coalesced_guid(data['job']['coalesced'])
- tj.add_job_guid(data['job']['job_guid'])
- tj.add_job_name(data['job']['name'])
- tj.add_job_symbol(data['job']['job_symbol'])
- tj.add_group_name(data['job']['group_name'])
- tj.add_group_symbol(data['job']['group_symbol'])
- tj.add_description(data['job']['desc'])
- tj.add_product_name(data['job']['product_name'])
- tj.add_state(data['job']['state'])
- tj.add_result(data['job']['result'])
- tj.add_reason(data['job']['reason'])
- tj.add_who(data['job']['who'])
- tj.add_tier(data['job']['tier'])
- tj.add_submit_timestamp(data['job']['submit_timestamp'])
- tj.add_start_timestamp(data['job']['start_timestamp'])
- tj.add_end_timestamp(data['job']['end_timestamp'])
- tj.add_machine(data['job']['machine'])
+ tj.add_revision(data["revision"])
+ tj.add_project(data["project"])
+ tj.add_coalesced_guid(data["job"]["coalesced"])
+ tj.add_job_guid(data["job"]["job_guid"])
+ tj.add_job_name(data["job"]["name"])
+ tj.add_job_symbol(data["job"]["job_symbol"])
+ tj.add_group_name(data["job"]["group_name"])
+ tj.add_group_symbol(data["job"]["group_symbol"])
+ tj.add_description(data["job"]["desc"])
+ tj.add_product_name(data["job"]["product_name"])
+ tj.add_state(data["job"]["state"])
+ tj.add_result(data["job"]["result"])
+ tj.add_reason(data["job"]["reason"])
+ tj.add_who(data["job"]["who"])
+ tj.add_tier(data["job"]["tier"])
+ tj.add_submit_timestamp(data["job"]["submit_timestamp"])
+ tj.add_start_timestamp(data["job"]["start_timestamp"])
+ tj.add_end_timestamp(data["job"]["end_timestamp"])
+ tj.add_machine(data["job"]["machine"])
tj.add_build_info(
- data['job']['build_platform']['os_name'],
- data['job']['build_platform']['platform'],
- data['job']['build_platform']['architecture']
+ data["job"]["build_platform"]["os_name"],
+ data["job"]["build_platform"]["platform"],
+ data["job"]["build_platform"]["architecture"],
)
tj.add_machine_info(
- data['job']['machine_platform']['os_name'],
- data['job']['machine_platform']['platform'],
- data['job']['machine_platform']['architecture']
+ data["job"]["machine_platform"]["os_name"],
+ data["job"]["machine_platform"]["platform"],
+ data["job"]["machine_platform"]["architecture"],
)
- tj.add_option_collection(data['job']['option_collection'])
+ tj.add_option_collection(data["job"]["option_collection"])
- for artifact_data in data['job']['artifacts']:
- tj.add_artifact(
- artifact_data['name'],
- artifact_data['type'],
- artifact_data['blob']
- )
+ for artifact_data in data["job"]["artifacts"]:
+ tj.add_artifact(artifact_data["name"], artifact_data["type"], artifact_data["blob"])
tjc.add(tj)
return tjc
@@ -161,30 +148,28 @@ def create_job_collection(dataset):
# TODO: refactor this big function to smaller chunks
def submit(perf_data, failures, revision, summary, engine):
-
print("[DEBUG] failures:")
- print(list(map(lambda x: x['testcase'], failures)))
+ print(list(map(lambda x: x["testcase"], failures)))
- author = "{} <{}>".format(revision['author']['name'],
- revision['author']['email'])
+ author = "{} <{}>".format(revision["author"]["name"], revision["author"]["email"])
dataset = [
{
# The top-most revision in the list of commits for a push.
- 'revision': revision['commit'],
- 'author': author,
- 'push_timestamp': int(revision['author']['timestamp']),
- 'type': 'push',
+ "revision": revision["commit"],
+ "author": author,
+ "push_timestamp": int(revision["author"]["timestamp"]),
+ "type": "push",
# a list of revisions associated with the resultset. There should
# be at least one.
- 'revisions': [
+ "revisions": [
{
- 'comment': revision['subject'],
- 'revision': revision['commit'],
- 'repository': 'servo',
- 'author': author
+ "comment": revision["subject"],
+ "revision": revision["commit"],
+ "repository": "servo",
+ "author": author,
}
- ]
+ ],
}
]
@@ -195,158 +180,129 @@ def submit(perf_data, failures, revision, summary, engine):
# if len(failures) > 0:
# result = "testfailed"
- hashlen = len(revision['commit'])
- job_guid = ''.join(
- random.choice(string.ascii_letters + string.digits) for i in range(hashlen)
- )
+ hashlen = len(revision["commit"])
+ job_guid = "".join(random.choice(string.ascii_letters + string.digits) for i in range(hashlen))
- if (engine == "gecko"):
+ if engine == "gecko":
project = "servo"
- job_symbol = 'PLG'
- group_symbol = 'SPG'
- group_name = 'Servo Perf on Gecko'
+ job_symbol = "PLG"
+ group_symbol = "SPG"
+ group_name = "Servo Perf on Gecko"
else:
project = "servo"
- job_symbol = 'PL'
- group_symbol = 'SP'
- group_name = 'Servo Perf'
+ job_symbol = "PL"
+ group_symbol = "SP"
+ group_name = "Servo Perf"
dataset = [
{
- 'project': project,
- 'revision': revision['commit'],
- 'job': {
- 'job_guid': job_guid,
- 'product_name': project,
- 'reason': 'scheduler',
+ "project": project,
+ "revision": revision["commit"],
+ "job": {
+ "job_guid": job_guid,
+ "product_name": project,
+ "reason": "scheduler",
# TODO: What is `who` for?
- 'who': 'Servo',
- 'desc': 'Servo Page Load Time Tests',
- 'name': 'Servo Page Load Time',
+ "who": "Servo",
+ "desc": "Servo Page Load Time Tests",
+ "name": "Servo Page Load Time",
# The symbol representing the job displayed in
# treeherder.allizom.org
- 'job_symbol': job_symbol,
-
+ "job_symbol": job_symbol,
# The symbol representing the job group in
# treeherder.allizom.org
- 'group_symbol': group_symbol,
- 'group_name': group_name,
-
+ "group_symbol": group_symbol,
+ "group_name": group_name,
# TODO: get the real timing from the test runner
- 'submit_timestamp': str(int(time.time())),
- 'start_timestamp': str(int(time.time())),
- 'end_timestamp': str(int(time.time())),
-
- 'state': 'completed',
- 'result': result, # "success" or "testfailed"
-
- 'machine': 'local-machine',
+ "submit_timestamp": str(int(time.time())),
+ "start_timestamp": str(int(time.time())),
+ "end_timestamp": str(int(time.time())),
+ "state": "completed",
+ "result": result, # "success" or "testfailed"
+ "machine": "local-machine",
# TODO: read platform from test result
- 'build_platform': {
- 'platform': 'linux64',
- 'os_name': 'linux',
- 'architecture': 'x86_64'
- },
- 'machine_platform': {
- 'platform': 'linux64',
- 'os_name': 'linux',
- 'architecture': 'x86_64'
- },
-
- 'option_collection': {'opt': True},
-
+ "build_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
+ "machine_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
+ "option_collection": {"opt": True},
# jobs can belong to different tiers
# setting the tier here will determine which tier the job
# belongs to. However, if a job is set as Tier of 1, but
# belongs to the Tier 2 profile on the server, it will still
# be saved as Tier 2.
- 'tier': 1,
-
+ "tier": 1,
# the ``name`` of the log can be the default of "buildbot_text"
# however, you can use a custom name. See below.
# TODO: point this to the log when we have them uploaded to S3
- 'log_references': [
- {
- 'url': 'TBD',
- 'name': 'test log'
- }
- ],
+ "log_references": [{"url": "TBD", "name": "test log"}],
# The artifact can contain any kind of structured data
# associated with a test.
- 'artifacts': [
+ "artifacts": [
{
- 'type': 'json',
- 'name': 'performance_data',
+ "type": "json",
+ "name": "performance_data",
# TODO: include the job_guid when the runner actually
# generates one
# 'job_guid': job_guid,
- 'blob': perf_data
+ "blob": perf_data,
},
{
- 'type': 'json',
- 'name': 'Job Info',
+ "type": "json",
+ "name": "Job Info",
# 'job_guid': job_guid,
"blob": {
- "job_details": [
- {
- "content_type": "raw_html",
- "title": "Result Summary",
- "value": summary
- }
- ]
- }
- }
+ "job_details": [{"content_type": "raw_html", "title": "Result Summary", "value": summary}]
+ },
+ },
],
# List of job guids that were coalesced to this job
- 'coalesced': []
- }
+ "coalesced": [],
+ },
}
]
tjc = create_job_collection(dataset)
# TODO: extract this read credential code out of this function.
- cred = {
- 'client_id': os.environ['TREEHERDER_CLIENT_ID'],
- 'secret': os.environ['TREEHERDER_CLIENT_SECRET']
- }
+ cred = {"client_id": os.environ["TREEHERDER_CLIENT_ID"], "secret": os.environ["TREEHERDER_CLIENT_SECRET"]}
- client = TreeherderClient(server_url='https://treeherder.mozilla.org',
- client_id=cred['client_id'],
- secret=cred['secret'])
+ client = TreeherderClient(
+ server_url="https://treeherder.mozilla.org", client_id=cred["client_id"], secret=cred["secret"]
+ )
# data structure validation is automatically performed here, if validation
# fails a TreeherderClientError is raised
- client.post_collection('servo', trsc)
- client.post_collection('servo', tjc)
+ client.post_collection("servo", trsc)
+ client.post_collection("servo", tjc)
def main():
parser = argparse.ArgumentParser(
- description=("Submit Servo performance data to Perfherder. "
- "Remember to set your Treeherder credential as environment"
- " variable \'TREEHERDER_CLIENT_ID\' and "
- "\'TREEHERDER_CLIENT_SECRET\'"))
- parser.add_argument("perf_json",
- help="the output json from runner")
- parser.add_argument("revision_json",
- help="the json containing the servo revision data")
- parser.add_argument("--engine",
- type=str,
- default='servo',
- help=("The engine to run the tests on. Currently only"
- " servo and gecko are supported."))
+ description=(
+ "Submit Servo performance data to Perfherder. "
+ "Remember to set your Treeherder credential as environment"
+ " variable 'TREEHERDER_CLIENT_ID' and "
+ "'TREEHERDER_CLIENT_SECRET'"
+ )
+ )
+ parser.add_argument("perf_json", help="the output json from runner")
+ parser.add_argument("revision_json", help="the json containing the servo revision data")
+ parser.add_argument(
+ "--engine",
+ type=str,
+ default="servo",
+ help=("The engine to run the tests on. Currently only servo and gecko are supported."),
+ )
args = parser.parse_args()
- with open(args.perf_json, 'r') as f:
+ with open(args.perf_json, "r") as f:
result_json = json.load(f)
- with open(args.revision_json, 'r') as f:
+ with open(args.revision_json, "r") as f:
revision = json.load(f)
perf_data = format_perf_data(result_json, args.engine)
- failures = list(filter(lambda x: x['domComplete'] == -1, result_json))
- summary = format_result_summary(result_json).replace('\n', '<br/>')
+ failures = list(filter(lambda x: x["domComplete"] == -1, result_json))
+ summary = format_result_summary(result_json).replace("\n", "<br/>")
submit(perf_data, failures, revision, summary, args.engine)
print("Done!")
diff --git a/etc/ci/performance/submit_to_s3.py b/etc/ci/performance/submit_to_s3.py
index e390464f182..9747ad8c211 100644
--- a/etc/ci/performance/submit_to_s3.py
+++ b/etc/ci/performance/submit_to_s3.py
@@ -10,17 +10,16 @@ import boto3
def main():
parser = argparse.ArgumentParser(
- description=("Submit Servo performance data to S3. "
- "Remember to set your S3 credentials "
- "https://github.com/boto/boto3"))
- parser.add_argument("perf_file",
- help="the output CSV file from runner")
- parser.add_argument("perf_key",
- help="the S3 key to upload to")
+ description=(
+ "Submit Servo performance data to S3. Remember to set your S3 credentials https://github.com/boto/boto3"
+ )
+ )
+ parser.add_argument("perf_file", help="the output CSV file from runner")
+ parser.add_argument("perf_key", help="the S3 key to upload to")
args = parser.parse_args()
- s3 = boto3.client('s3')
- BUCKET = 'servo-perf'
+ s3 = boto3.client("s3")
+ BUCKET = "servo-perf"
s3.upload_file(args.perf_file, BUCKET, args.perf_key)
print("Done!")
diff --git a/etc/ci/performance/test_differ.py b/etc/ci/performance/test_differ.py
index 31bf322b34b..c39fd34755b 100644
--- a/etc/ci/performance/test_differ.py
+++ b/etc/ci/performance/test_differ.py
@@ -16,16 +16,16 @@ args = parser.parse_args()
def load_data(filename):
- with open(filename, 'r') as f:
+ with open(filename, "r") as f:
results = {}
totals = {}
counts = {}
records = json.load(f)
for record in records:
- key = record.get('testcase')
- value = record.get('domComplete') - record.get('domLoading')
- totals[key] = totals.get('key', 0) + value
- counts[key] = counts.get('key', 0) + 1
+ key = record.get("testcase")
+ value = record.get("domComplete") - record.get("domLoading")
+ totals[key] = totals.get("key", 0) + value
+ counts[key] = counts.get("key", 0) + 1
results[key] = round(totals[key] / counts[key])
return results
@@ -34,10 +34,10 @@ data1 = load_data(args.file1)
data2 = load_data(args.file2)
keys = set(data1.keys()).union(data2.keys())
-BLUE = '\033[94m'
-GREEN = '\033[92m'
-WARNING = '\033[93m'
-END = '\033[0m'
+BLUE = "\033[94m"
+GREEN = "\033[92m"
+WARNING = "\033[93m"
+END = "\033[0m"
total1 = 0
diff --git a/etc/ci/performance/test_runner.py b/etc/ci/performance/test_runner.py
index 992fedab59e..c7773680449 100644
--- a/etc/ci/performance/test_runner.py
+++ b/etc/ci/performance/test_runner.py
@@ -10,7 +10,7 @@ import pytest
def test_log_parser():
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
- mock_log = b'''
+ mock_log = b"""
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
@@ -36,38 +36,40 @@ def test_log_parser():
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
-'''
-
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "navigationStart": 1460358376,
- "unloadEventStart": None,
- "unloadEventEnd": None,
- "redirectStart": None,
- "redirectEnd": None,
- "fetchStart": None,
- "domainLookupStart": None,
- "domainLookupEnd": None,
- "connectStart": None,
- "connectEnd": None,
- "secureConnectionStart": None,
- "requestStart": None,
- "responseStart": None,
- "responseEnd": None,
- "domLoading": 1460358376000,
- "domInteractive": 1460358388000,
- "domContentLoadedEventStart": 1460358388000,
- "domContentLoadedEventEnd": 1460358388000,
- "domComplete": 1460358389000,
- "loadEventStart": None,
- "loadEventEnd": None
- }]
+"""
+
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "navigationStart": 1460358376,
+ "unloadEventStart": None,
+ "unloadEventEnd": None,
+ "redirectStart": None,
+ "redirectEnd": None,
+ "fetchStart": None,
+ "domainLookupStart": None,
+ "domainLookupEnd": None,
+ "connectStart": None,
+ "connectEnd": None,
+ "secureConnectionStart": None,
+ "requestStart": None,
+ "responseStart": None,
+ "responseEnd": None,
+ "domLoading": 1460358376000,
+ "domInteractive": 1460358388000,
+ "domContentLoadedEventStart": 1460358388000,
+ "domContentLoadedEventEnd": 1460358388000,
+ "domComplete": 1460358389000,
+ "loadEventStart": None,
+ "loadEventEnd": None,
+ }
+ ]
result = runner.parse_log(mock_log, mock_url)
- assert (expected == list(result))
+ assert expected == list(result)
def test_log_parser_complex():
- mock_log = b'''
+ mock_log = b"""
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
[PERF],navigationStart,1460358300
@@ -119,38 +121,40 @@ Some other js error logs here
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
-'''
+"""
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "navigationStart": 1460358376,
- "unloadEventStart": None,
- "unloadEventEnd": None,
- "redirectStart": None,
- "redirectEnd": None,
- "fetchStart": None,
- "domainLookupStart": None,
- "domainLookupEnd": None,
- "connectStart": None,
- "connectEnd": None,
- "secureConnectionStart": None,
- "requestStart": None,
- "responseStart": None,
- "responseEnd": None,
- "domLoading": 1460358376000,
- "domInteractive": 1460358388000,
- "domContentLoadedEventStart": 1460358388000,
- "domContentLoadedEventEnd": 1460358388000,
- "domComplete": 1460358389000,
- "loadEventStart": None,
- "loadEventEnd": None
- }]
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "navigationStart": 1460358376,
+ "unloadEventStart": None,
+ "unloadEventEnd": None,
+ "redirectStart": None,
+ "redirectEnd": None,
+ "fetchStart": None,
+ "domainLookupStart": None,
+ "domainLookupEnd": None,
+ "connectStart": None,
+ "connectEnd": None,
+ "secureConnectionStart": None,
+ "requestStart": None,
+ "responseStart": None,
+ "responseEnd": None,
+ "domLoading": 1460358376000,
+ "domInteractive": 1460358388000,
+ "domContentLoadedEventStart": 1460358388000,
+ "domContentLoadedEventEnd": 1460358388000,
+ "domComplete": 1460358389000,
+ "loadEventStart": None,
+ "loadEventEnd": None,
+ }
+ ]
result = runner.parse_log(mock_log, mock_url)
- assert (expected == list(result))
+ assert expected == list(result)
def test_log_parser_empty():
- mock_log = b'''
+ mock_log = b"""
[PERF] perf block start
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
@@ -158,75 +162,79 @@ def test_log_parser_empty():
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF] perf block end
-'''
+"""
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "title": "",
- "navigationStart": 0,
- "unloadEventStart": -1,
- "unloadEventEnd": -1,
- "redirectStart": -1,
- "redirectEnd": -1,
- "fetchStart": -1,
- "domainLookupStart": -1,
- "domainLookupEnd": -1,
- "connectStart": -1,
- "connectEnd": -1,
- "secureConnectionStart": -1,
- "requestStart": -1,
- "responseStart": -1,
- "responseEnd": -1,
- "domLoading": -1,
- "domInteractive": -1,
- "domContentLoadedEventStart": -1,
- "domContentLoadedEventEnd": -1,
- "domComplete": -1,
- "loadEventStart": -1,
- "loadEventEnd": -1
- }]
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "title": "",
+ "navigationStart": 0,
+ "unloadEventStart": -1,
+ "unloadEventEnd": -1,
+ "redirectStart": -1,
+ "redirectEnd": -1,
+ "fetchStart": -1,
+ "domainLookupStart": -1,
+ "domainLookupEnd": -1,
+ "connectStart": -1,
+ "connectEnd": -1,
+ "secureConnectionStart": -1,
+ "requestStart": -1,
+ "responseStart": -1,
+ "responseEnd": -1,
+ "domLoading": -1,
+ "domInteractive": -1,
+ "domContentLoadedEventStart": -1,
+ "domContentLoadedEventEnd": -1,
+ "domComplete": -1,
+ "loadEventStart": -1,
+ "loadEventEnd": -1,
+ }
+ ]
result = runner.parse_log(mock_log, mock_testcase)
- assert (expected == list(result))
+ assert expected == list(result)
def test_log_parser_error():
- mock_log = b'Nothing here! Test failed!'
+ mock_log = b"Nothing here! Test failed!"
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "title": "",
- "navigationStart": 0,
- "unloadEventStart": -1,
- "unloadEventEnd": -1,
- "redirectStart": -1,
- "redirectEnd": -1,
- "fetchStart": -1,
- "domainLookupStart": -1,
- "domainLookupEnd": -1,
- "connectStart": -1,
- "connectEnd": -1,
- "secureConnectionStart": -1,
- "requestStart": -1,
- "responseStart": -1,
- "responseEnd": -1,
- "domLoading": -1,
- "domInteractive": -1,
- "domContentLoadedEventStart": -1,
- "domContentLoadedEventEnd": -1,
- "domComplete": -1,
- "loadEventStart": -1,
- "loadEventEnd": -1
- }]
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "title": "",
+ "navigationStart": 0,
+ "unloadEventStart": -1,
+ "unloadEventEnd": -1,
+ "redirectStart": -1,
+ "redirectEnd": -1,
+ "fetchStart": -1,
+ "domainLookupStart": -1,
+ "domainLookupEnd": -1,
+ "connectStart": -1,
+ "connectEnd": -1,
+ "secureConnectionStart": -1,
+ "requestStart": -1,
+ "responseStart": -1,
+ "responseEnd": -1,
+ "domLoading": -1,
+ "domInteractive": -1,
+ "domContentLoadedEventStart": -1,
+ "domContentLoadedEventEnd": -1,
+ "domComplete": -1,
+ "loadEventStart": -1,
+ "loadEventEnd": -1,
+ }
+ ]
result = runner.parse_log(mock_log, mock_testcase)
- assert (expected == list(result))
+ assert expected == list(result)
def test_log_parser_bad_testcase_name():
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
# Notice the testcase is about:blank, servo crashed
- mock_log = b'''
+ mock_log = b"""
[PERF] perf block start
[PERF],testcase,about:blank
[PERF],navigationStart,1460358376
@@ -252,182 +260,196 @@ def test_log_parser_bad_testcase_name():
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
-'''
-
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "title": "",
- "navigationStart": 0,
- "unloadEventStart": -1,
- "unloadEventEnd": -1,
- "redirectStart": -1,
- "redirectEnd": -1,
- "fetchStart": -1,
- "domainLookupStart": -1,
- "domainLookupEnd": -1,
- "connectStart": -1,
- "connectEnd": -1,
- "secureConnectionStart": -1,
- "requestStart": -1,
- "responseStart": -1,
- "responseEnd": -1,
- "domLoading": -1,
- "domInteractive": -1,
- "domContentLoadedEventStart": -1,
- "domContentLoadedEventEnd": -1,
- "domComplete": -1,
- "loadEventStart": -1,
- "loadEventEnd": -1
- }]
+"""
+
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "title": "",
+ "navigationStart": 0,
+ "unloadEventStart": -1,
+ "unloadEventEnd": -1,
+ "redirectStart": -1,
+ "redirectEnd": -1,
+ "fetchStart": -1,
+ "domainLookupStart": -1,
+ "domainLookupEnd": -1,
+ "connectStart": -1,
+ "connectEnd": -1,
+ "secureConnectionStart": -1,
+ "requestStart": -1,
+ "responseStart": -1,
+ "responseEnd": -1,
+ "domLoading": -1,
+ "domInteractive": -1,
+ "domContentLoadedEventStart": -1,
+ "domContentLoadedEventEnd": -1,
+ "domComplete": -1,
+ "loadEventStart": -1,
+ "loadEventEnd": -1,
+ }
+ ]
result = runner.parse_log(mock_log, mock_testcase)
- assert (expected == list(result))
+ assert expected == list(result)
def test_manifest_loader():
-
- text = '''
+ text = """
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
# Disabled! http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
-'''
+"""
expected = [
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", False),
- ("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False)
+ ("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False),
]
- assert (expected == list(runner.parse_manifest(text)))
+ assert expected == list(runner.parse_manifest(text))
def test_manifest_loader_async():
-
- text = '''
+ text = """
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
async http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
-'''
+"""
expected = [
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", True),
]
- assert (expected == list(runner.parse_manifest(text)))
+ assert expected == list(runner.parse_manifest(text))
def test_filter_result_by_manifest():
- input_json = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
- "domComplete": 1460358389000,
- }, {
- "testcase": "non-existing-html",
- "domComplete": 1460358389000,
- }, {
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389000,
- }]
-
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389000,
- }]
-
- manifest = [
- ("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)
+ input_json = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
+ "domComplete": 1460358389000,
+ },
+ {
+ "testcase": "non-existing-html",
+ "domComplete": 1460358389000,
+ },
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389000,
+ },
]
- assert (expected == runner.filter_result_by_manifest(input_json, manifest))
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389000,
+ }
+ ]
+
+ manifest = [("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)]
+
+ assert expected == runner.filter_result_by_manifest(input_json, manifest)
def test_filter_result_by_manifest_error():
- input_json = [{
- "testcase": "1.html",
- "domComplete": 1460358389000,
- }]
-
- manifest = [
- ("1.html", False),
- ("2.html", False)
+ input_json = [
+ {
+ "testcase": "1.html",
+ "domComplete": 1460358389000,
+ }
]
+ manifest = [("1.html", False), ("2.html", False)]
+
with pytest.raises(Exception) as execinfo:
runner.filter_result_by_manifest(input_json, manifest)
assert "Missing test result" in str(execinfo.value)
def test_take_result_median_odd():
- input_json = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389001,
- "domLoading": 1460358380002
- }, {
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389002,
- "domLoading": 1460358380001
- }, {
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389003,
- "domLoading": 1460358380003
- }]
-
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389002,
- "domLoading": 1460358380002
- }]
-
- assert (expected == runner.take_result_median(input_json, len(input_json)))
+ input_json = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389001,
+ "domLoading": 1460358380002,
+ },
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389002,
+ "domLoading": 1460358380001,
+ },
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389003,
+ "domLoading": 1460358380003,
+ },
+ ]
+
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389002,
+ "domLoading": 1460358380002,
+ }
+ ]
+
+ assert expected == runner.take_result_median(input_json, len(input_json))
def test_take_result_median_even():
- input_json = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389001,
- "domLoading": 1460358380002
- }, {
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389002,
- "domLoading": 1460358380001
- }]
+ input_json = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389001,
+ "domLoading": 1460358380002,
+ },
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389002,
+ "domLoading": 1460358380001,
+ },
+ ]
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389001.5,
- "domLoading": 1460358380001.5
- }]
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389001.5,
+ "domLoading": 1460358380001.5,
+ }
+ ]
- assert (expected == runner.take_result_median(input_json, len(input_json)))
+ assert expected == runner.take_result_median(input_json, len(input_json))
def test_take_result_median_error():
- input_json = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": None,
- "domLoading": 1460358380002
- }, {
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389002,
- "domLoading": 1460358380001
- }]
+ input_json = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": None,
+ "domLoading": 1460358380002,
+ },
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389002,
+ "domLoading": 1460358380001,
+ },
+ ]
- expected = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": 1460358389002,
- "domLoading": 1460358380001.5
- }]
+ expected = [
+ {
+ "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
+ "domComplete": 1460358389002,
+ "domLoading": 1460358380001.5,
+ }
+ ]
- assert (expected == runner.take_result_median(input_json, len(input_json)))
+ assert expected == runner.take_result_median(input_json, len(input_json))
def test_log_result():
- results = [{
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": -1
- }, {
- "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
- "domComplete": -1
- }, {
- "testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html",
- "domComplete": 123456789
- }]
+ results = [
+ {"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
+ {"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
+ {"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html", "domComplete": 123456789},
+ ]
expected = """
========================================
@@ -437,4 +459,4 @@ Failure summary:
- http://localhost:8000/page_load_test/56.com/www.56.com/index.html
========================================
"""
- assert (expected == runner.format_result_summary(results))
+ assert expected == runner.format_result_summary(results)
diff --git a/etc/ci/performance/test_submit_to_perfherder.py b/etc/ci/performance/test_submit_to_perfherder.py
index a6a89a11da0..bf58cc7d662 100644
--- a/etc/ci/performance/test_submit_to_perfherder.py
+++ b/etc/ci/performance/test_submit_to_perfherder.py
@@ -8,18 +8,18 @@ import submit_to_perfherder
def test_format_testcase_name():
- assert ('about:blank' == submit_to_perfherder.format_testcase_name(
- 'about:blank'))
- assert ('163.com' == submit_to_perfherder.format_testcase_name((
- 'http://localhost:8000/page_load_test/163.com/p.mail.163.com/'
- 'mailinfo/shownewmsg_www_1222.htm.html')))
- assert (('1234567890223456789032345678904234567890'
- '5234567890623456789072345678908234567890')
- == submit_to_perfherder.format_testcase_name((
- '1234567890223456789032345678904234567890'
- '52345678906234567890723456789082345678909234567890')))
- assert ('news.ycombinator.com' == submit_to_perfherder.format_testcase_name(
- 'http://localhost:8000/tp6/news.ycombinator.com/index.html'))
+ assert "about:blank" == submit_to_perfherder.format_testcase_name("about:blank")
+ assert "163.com" == submit_to_perfherder.format_testcase_name(
+ ("http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html")
+ )
+ assert (
+ "12345678902234567890323456789042345678905234567890623456789072345678908234567890"
+ ) == submit_to_perfherder.format_testcase_name(
+ ("123456789022345678903234567890423456789052345678906234567890723456789082345678909234567890")
+ )
+ assert "news.ycombinator.com" == submit_to_perfherder.format_testcase_name(
+ "http://localhost:8000/tp6/news.ycombinator.com/index.html"
+ )
def test_format_perf_data():
@@ -46,7 +46,7 @@ def test_format_perf_data():
"unloadEventEnd": None,
"responseEnd": None,
"testcase": "about:blank",
- "domComplete": 1460444931000
+ "domComplete": 1460444931000,
},
{
"unloadEventStart": None,
@@ -69,11 +69,11 @@ def test_format_perf_data():
"domainLookupEnd": None,
"unloadEventEnd": None,
"responseEnd": None,
- "testcase": ("http://localhost:8000/page_load_test/163.com/"
- "p.mail.163.com/mailinfo/"
- "shownewmsg_www_1222.htm.html"),
- "domComplete": 1460444948000
- }
+ "testcase": (
+ "http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
+ ),
+ "domComplete": 1460444948000,
+ },
]
expected = {
@@ -84,33 +84,27 @@ def test_format_perf_data():
"name": "domComplete",
"value": 3741.657386773941,
"subtests": [
- {"name": "about:blank",
- "value": 1000},
- {"name": "163.com",
- "value": 14000},
- ]
+ {"name": "about:blank", "value": 1000},
+ {"name": "163.com", "value": 14000},
+ ],
}
- ]
+ ],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
- assert (expected == result)
+ assert expected == result
def test_format_bad_perf_data():
mock_result = [
- {
- "navigationStart": 1460444930000,
- "testcase": "about:blank",
- "domComplete": 0
- },
+ {"navigationStart": 1460444930000, "testcase": "about:blank", "domComplete": 0},
{
"navigationStart": 1460444934000,
- "testcase": ("http://localhost:8000/page_load_test/163.com/"
- "p.mail.163.com/mailinfo/"
- "shownewmsg_www_1222.htm.html"),
- "domComplete": 1460444948000
- }
+ "testcase": (
+ "http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
+ ),
+ "domComplete": 1460444948000,
+ },
]
expected = {
@@ -121,14 +115,12 @@ def test_format_bad_perf_data():
"name": "domComplete",
"value": 14000.0,
"subtests": [
- {"name": "about:blank",
- "value": -1}, # Timeout
- {"name": "163.com",
- "value": 14000},
- ]
+ {"name": "about:blank", "value": -1}, # Timeout
+ {"name": "163.com", "value": 14000},
+ ],
}
- ]
+ ],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
- assert (expected == result)
+ assert expected == result
diff --git a/etc/ci/report_aggregated_expected_results.py b/etc/ci/report_aggregated_expected_results.py
index fbfe763d74c..1fca5c7eb81 100755
--- a/etc/ci/report_aggregated_expected_results.py
+++ b/etc/ci/report_aggregated_expected_results.py
@@ -37,7 +37,7 @@ class Item:
def from_result(cls, result: dict, title: Optional[str] = None, print_stack=True):
expected = result["expected"]
actual = result["actual"]
- title = title if title else f'`{result["path"]}`'
+ title = title if title else f"`{result['path']}`"
if expected != actual:
title = f"{actual} [expected {expected}] {title}"
else:
@@ -45,8 +45,7 @@ class Item:
issue_url = "http://github.com/servo/servo/issues/"
if "issues" in result and result["issues"]:
- issues = ", ".join([f"[#{issue}]({issue_url}{issue})"
- for issue in result["issues"]])
+ issues = ", ".join([f"[#{issue}]({issue_url}{issue})" for issue in result["issues"]])
title += f" ({issues})"
stack = result["stack"] if result["stack"] and print_stack else ""
@@ -59,8 +58,9 @@ class Item:
cls.from_result(
subtest_result,
f"subtest: `{subtest_result['subtest']}`"
- + (f" \n```\n{subtest_result['message']}\n```\n" if subtest_result['message'] else ""),
- False)
+ + (f" \n```\n{subtest_result['message']}\n```\n" if subtest_result["message"] else ""),
+ False,
+ )
for subtest_result in subtest_results
]
return cls(title, body, children)
@@ -68,10 +68,8 @@ class Item:
def to_string(self, bullet: str = "", indent: str = ""):
output = f"{indent}{bullet}{self.title}\n"
if self.body:
- output += textwrap.indent(f"{self.body}\n",
- " " * len(indent + bullet))
- output += "\n".join([child.to_string("• ", indent + " ")
- for child in self.children])
+ output += textwrap.indent(f"{self.body}\n", " " * len(indent + bullet))
+ output += "\n".join([child.to_string("• ", indent + " ") for child in self.children])
return output.rstrip().replace("`", "")
def to_html(self, level: int = 0) -> ElementTree.Element:
@@ -88,17 +86,13 @@ class Item:
if self.children:
# Some tests have dozens of failing tests, which overwhelm the
# output. Limit the output for subtests in GitHub comment output.
- max_children = len(
- self.children) if level < 2 else SUBTEST_RESULT_TRUNCATION
+ max_children = len(self.children) if level < 2 else SUBTEST_RESULT_TRUNCATION
if len(self.children) > max_children:
children = self.children[:max_children]
- children.append(Item(
- f"And {len(self.children) - max_children} more unexpected results...",
- "", []))
+ children.append(Item(f"And {len(self.children) - max_children} more unexpected results...", "", []))
else:
children = self.children
- container = ElementTree.SubElement(
- result, "div" if not level else "ul")
+ container = ElementTree.SubElement(result, "div" if not level else "ul")
for child in children:
container.append(child.to_html(level + 1))
@@ -125,17 +119,16 @@ def get_results(filenames: list[str], tag: str = "") -> Optional[Item]:
return not is_flaky(result) and not result["issues"]
def add_children(children: List[Item], results: List[dict], filter_func, text):
- filtered = [Item.from_result(result) for result in
- filter(filter_func, results)]
+ filtered = [Item.from_result(result) for result in filter(filter_func, results)]
if filtered:
children.append(Item(f"{text} ({len(filtered)})", "", filtered))
children: List[Item] = []
add_children(children, unexpected, is_flaky, "Flaky unexpected result")
- add_children(children, unexpected, is_stable_and_known,
- "Stable unexpected results that are known to be intermittent")
- add_children(children, unexpected, is_stable_and_unexpected,
- "Stable unexpected results")
+ add_children(
+ children, unexpected, is_stable_and_known, "Stable unexpected results that are known to be intermittent"
+ )
+ add_children(children, unexpected, is_stable_and_unexpected, "Stable unexpected results")
run_url = get_github_run_url()
text = "Test results"
@@ -154,8 +147,8 @@ def get_github_run_url() -> Optional[str]:
return None
if "run_id" not in github_context:
return None
- repository = github_context['repository']
- run_id = github_context['run_id']
+ repository = github_context["repository"]
+ run_id = github_context["run_id"]
return f"[#{run_id}](https://github.com/{repository}/actions/runs/{run_id})"
@@ -197,14 +190,14 @@ def create_github_reports(body: str, tag: str = ""):
# This process is based on the documentation here:
# https://docs.github.com/en/rest/checks/runs?apiVersion=2022-11-28#create-a-check-runs
results = json.loads(os.environ.get("RESULTS", "{}"))
- if all(r == 'success' for r in results):
- conclusion = 'success'
+ if all(r == "success" for r in results):
+ conclusion = "success"
elif "failure" in results:
- conclusion = 'failure'
+ conclusion = "failure"
elif "cancelled" in results:
- conclusion = 'cancelled'
+ conclusion = "cancelled"
else:
- conclusion = 'neutral'
+ conclusion = "neutral"
github_token = os.environ.get("GITHUB_TOKEN")
github_context = json.loads(os.environ.get("GITHUB_CONTEXT", "{}"))
@@ -214,34 +207,42 @@ def create_github_reports(body: str, tag: str = ""):
return None
repo = github_context["repository"]
data = {
- 'name': tag,
- 'head_sha': github_context["sha"],
- 'status': 'completed',
- 'started_at': datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
- 'conclusion': conclusion,
- 'completed_at': datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
- 'output': {
- 'title': f'Aggregated {tag} report',
- 'summary': body,
- 'images': [{'alt': 'WPT logo', 'image_url': 'https://avatars.githubusercontent.com/u/37226233'}]
+ "name": tag,
+ "head_sha": github_context["sha"],
+ "status": "completed",
+ "started_at": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
+ "conclusion": conclusion,
+ "completed_at": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
+ "output": {
+ "title": f"Aggregated {tag} report",
+ "summary": body,
+ "images": [{"alt": "WPT logo", "image_url": "https://avatars.githubusercontent.com/u/37226233"}],
},
- 'actions': [
- ]
+ "actions": [],
}
- subprocess.Popen(["curl", "-L",
- "-X", "POST",
- "-H", "Accept: application/vnd.github+json",
- "-H", f"Authorization: Bearer {github_token}",
- "-H", "X-GitHub-Api-Version: 2022-11-28",
- f"https://api.github.com/repos/{repo}/check-runs",
- "-d", json.dumps(data)]).wait()
+ subprocess.Popen(
+ [
+ "curl",
+ "-L",
+ "-X",
+ "POST",
+ "-H",
+ "Accept: application/vnd.github+json",
+ "-H",
+ f"Authorization: Bearer {github_token}",
+ "-H",
+ "X-GitHub-Api-Version: 2022-11-28",
+ f"https://api.github.com/repos/{repo}/check-runs",
+ "-d",
+ json.dumps(data),
+ ]
+ ).wait()
def main():
parser = argparse.ArgumentParser()
- parser.add_argument("--tag", default="wpt", action="store",
- help="A string tag used to distinguish the results.")
+ parser.add_argument("--tag", default="wpt", action="store", help="A string tag used to distinguish the results.")
args, filenames = parser.parse_known_args()
results = get_results(filenames, args.tag)
if not results:
@@ -251,14 +252,12 @@ def main():
print(results.to_string())
- html_string = ElementTree.tostring(
- results.to_html(), encoding="unicode")
+ html_string = ElementTree.tostring(results.to_html(), encoding="unicode")
create_github_reports(html_string, args.tag)
pr_number = get_pr_number()
if pr_number:
- process = subprocess.Popen(
- ['gh', 'pr', 'comment', pr_number, '-F', '-'], stdin=subprocess.PIPE)
+ process = subprocess.Popen(["gh", "pr", "comment", pr_number, "-F", "-"], stdin=subprocess.PIPE)
print(process.communicate(input=html_string.encode("utf-8"))[0])
else:
print("Could not find PR number in environment. Not making GitHub comment.")
diff --git a/etc/crates-graph.py b/etc/crates-graph.py
index 7970c94119e..06800013f6d 100755
--- a/etc/crates-graph.py
+++ b/etc/crates-graph.py
@@ -35,6 +35,7 @@ def main(crate=None):
for dependency in graph.get(name, []):
filtered.setdefault(name, []).append(dependency)
traverse(dependency)
+
traverse(crate)
else:
filtered = graph
diff --git a/etc/devtools_parser.py b/etc/devtools_parser.py
index 9455ba39ead..bc0f29c7eb9 100755
--- a/etc/devtools_parser.py
+++ b/etc/devtools_parser.py
@@ -42,12 +42,15 @@ import signal
import sys
from argparse import ArgumentParser
from subprocess import Popen, PIPE
+
try:
from termcolor import colored
except ImportError:
+
def colored(text, *args, **kwargs):
return text
+
fields = ["frame.time", "tcp.srcport", "tcp.payload"]
@@ -57,10 +60,14 @@ def record_data(file, port):
# Create tshark command
cmd = [
"tshark",
- "-T", "fields",
- "-i", "lo",
- "-d", f"tcp.port=={port},http",
- "-w", file,
+ "-T",
+ "fields",
+ "-i",
+ "lo",
+ "-d",
+ f"tcp.port=={port},http",
+ "-w",
+ file,
] + [e for f in fields for e in ("-e", f)]
process = Popen(cmd, stdout=PIPE)
@@ -84,8 +91,10 @@ def read_data(file):
# Create tshark command
cmd = [
"tshark",
- "-T", "fields",
- "-r", file,
+ "-T",
+ "fields",
+ "-r",
+ file,
] + [e for f in fields for e in ("-e", f)]
process = Popen(cmd, stdout=PIPE)
@@ -182,7 +191,7 @@ def parse_message(msg, *, json_output=False):
time, sender, i, data = msg
from_servo = sender == "Servo"
- colored_sender = colored(sender, 'black', 'on_yellow' if from_servo else 'on_magenta', attrs=['bold'])
+ colored_sender = colored(sender, "black", "on_yellow" if from_servo else "on_magenta", attrs=["bold"])
if not json_output:
print(f"\n{colored_sender} - {colored(i, 'blue')} - {colored(time, 'dark_grey')}")
@@ -199,7 +208,7 @@ def parse_message(msg, *, json_output=False):
assert False, "Message is neither a request nor a response"
else:
if from_servo and "from" in content:
- print(colored(f"Actor: {content['from']}", 'yellow'))
+ print(colored(f"Actor: {content['from']}", "yellow"))
print(json.dumps(content, sort_keys=True, indent=4))
except json.JSONDecodeError:
print(f"Warning: Couldn't decode json\n{data}")
@@ -236,7 +245,7 @@ if __name__ == "__main__":
if args.range and len(args.range.split(":")) == 2:
min, max = args.range.split(":")
- for msg in data[int(min):int(max) + 1]:
+ for msg in data[int(min) : int(max) + 1]:
# Filter the messages if specified
if not args.filter or args.filter.lower() in msg[3].lower():
parse_message(msg, json_output=args.json)
diff --git a/etc/memory_reports_over_time.py b/etc/memory_reports_over_time.py
index 9c28574b922..e7bef870564 100755
--- a/etc/memory_reports_over_time.py
+++ b/etc/memory_reports_over_time.py
@@ -21,14 +21,14 @@ def extract_memory_reports(lines):
report_lines = []
times = []
for line in lines:
- if line.startswith('Begin memory reports'):
+ if line.startswith("Begin memory reports"):
in_report = True
report_lines += [[]]
times += [line.strip().split()[-1]]
- elif line == 'End memory reports\n':
+ elif line == "End memory reports\n":
in_report = False
elif in_report:
- if line.startswith('|'):
+ if line.startswith("|"):
report_lines[-1].append(line.strip())
return (report_lines, times)
@@ -38,11 +38,11 @@ def parse_memory_report(lines):
parents = []
last_separator_index = None
for line in lines:
- assert (line[0] == '|')
+ assert line[0] == "|"
line = line[1:]
if not line:
continue
- separator_index = line.index('--')
+ separator_index = line.index("--")
if last_separator_index and separator_index <= last_separator_index:
while parents and parents[-1][1] >= separator_index:
parents.pop()
@@ -50,13 +50,9 @@ def parse_memory_report(lines):
amount, unit, _, name = line.split()
dest_report = reports
- for (parent, index) in parents:
- dest_report = dest_report[parent]['children']
- dest_report[name] = {
- 'amount': amount,
- 'unit': unit,
- 'children': {}
- }
+ for parent, index in parents:
+ dest_report = dest_report[parent]["children"]
+ dest_report[name] = {"amount": amount, "unit": unit, "children": {}}
parents += [(name, separator_index)]
last_separator_index = separator_index
@@ -68,24 +64,26 @@ def transform_report_for_test(report):
remaining = list(report.items())
while remaining:
(name, value) = remaining.pop()
- transformed[name] = '%s %s' % (value['amount'], value['unit'])
- remaining += map(lambda k_v: (name + '/' + k_v[0], k_v[1]), list(value['children'].items()))
+ transformed[name] = "%s %s" % (value["amount"], value["unit"])
+ remaining += map(lambda k_v: (name + "/" + k_v[0], k_v[1]), list(value["children"].items()))
return transformed
def test_extract_memory_reports():
- input = ["Begin memory reports",
- "|",
- " 154.56 MiB -- explicit\n",
- "| 107.88 MiB -- system-heap-unclassified\n",
- "End memory reports\n"]
- expected = ([['|', '| 107.88 MiB -- system-heap-unclassified']], ['reports'])
- assert (extract_memory_reports(input) == expected)
+ input = [
+ "Begin memory reports",
+ "|",
+ " 154.56 MiB -- explicit\n",
+ "| 107.88 MiB -- system-heap-unclassified\n",
+ "End memory reports\n",
+ ]
+ expected = ([["|", "| 107.88 MiB -- system-heap-unclassified"]], ["reports"])
+ assert extract_memory_reports(input) == expected
return 0
def test():
- input = '''|
+ input = """|
| 23.89 MiB -- explicit
| 21.35 MiB -- jemalloc-heap-unclassified
| 2.54 MiB -- url(https://servo.org/)
@@ -97,33 +95,33 @@ def test():
| 0.27 MiB -- stylist
| 0.12 MiB -- dom-tree
|
-| 25.18 MiB -- jemalloc-heap-active'''
+| 25.18 MiB -- jemalloc-heap-active"""
expected = {
- 'explicit': '23.89 MiB',
- 'explicit/jemalloc-heap-unclassified': '21.35 MiB',
- 'explicit/url(https://servo.org/)': '2.54 MiB',
- 'explicit/url(https://servo.org/)/js': '2.16 MiB',
- 'explicit/url(https://servo.org/)/js/gc-heap': '1.00 MiB',
- 'explicit/url(https://servo.org/)/js/gc-heap/decommitted': '0.77 MiB',
- 'explicit/url(https://servo.org/)/js/non-heap': '1.00 MiB',
- 'explicit/url(https://servo.org/)/layout-thread': '0.27 MiB',
- 'explicit/url(https://servo.org/)/layout-thread/stylist': '0.27 MiB',
- 'explicit/url(https://servo.org/)/dom-tree': '0.12 MiB',
- 'jemalloc-heap-active': '25.18 MiB',
+ "explicit": "23.89 MiB",
+ "explicit/jemalloc-heap-unclassified": "21.35 MiB",
+ "explicit/url(https://servo.org/)": "2.54 MiB",
+ "explicit/url(https://servo.org/)/js": "2.16 MiB",
+ "explicit/url(https://servo.org/)/js/gc-heap": "1.00 MiB",
+ "explicit/url(https://servo.org/)/js/gc-heap/decommitted": "0.77 MiB",
+ "explicit/url(https://servo.org/)/js/non-heap": "1.00 MiB",
+ "explicit/url(https://servo.org/)/layout-thread": "0.27 MiB",
+ "explicit/url(https://servo.org/)/layout-thread/stylist": "0.27 MiB",
+ "explicit/url(https://servo.org/)/dom-tree": "0.12 MiB",
+ "jemalloc-heap-active": "25.18 MiB",
}
- report = parse_memory_report(input.split('\n'))
+ report = parse_memory_report(input.split("\n"))
transformed = transform_report_for_test(report)
- assert (sorted(transformed.keys()) == sorted(expected.keys()))
+ assert sorted(transformed.keys()) == sorted(expected.keys())
for k, v in transformed.items():
- assert (v == expected[k])
+ assert v == expected[k]
test_extract_memory_reports()
return 0
def usage():
- print('%s --test - run automated tests' % sys.argv[0])
- print('%s file - extract all memory reports that are present in file' % sys.argv[0])
+ print("%s --test - run automated tests" % sys.argv[0])
+ print("%s file - extract all memory reports that are present in file" % sys.argv[0])
return 1
@@ -131,19 +129,19 @@ if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(usage())
- if sys.argv[1] == '--test':
+ if sys.argv[1] == "--test":
sys.exit(test())
with open(sys.argv[1]) as f:
lines = f.readlines()
(reports, times) = extract_memory_reports(lines)
json_reports = []
- for (report_lines, seconds) in zip(reports, times):
+ for report_lines, seconds in zip(reports, times):
report = parse_memory_report(report_lines)
- json_reports += [{'seconds': seconds, 'report': report}]
+ json_reports += [{"seconds": seconds, "report": report}]
with tempfile.NamedTemporaryFile(delete=False) as output:
thisdir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(thisdir, 'memory_chart.html')) as template:
+ with open(os.path.join(thisdir, "memory_chart.html")) as template:
content = template.read()
- output.write(content.replace('[/* json data */]', json.dumps(json_reports)))
- webbrowser.open_new_tab('file://' + output.name)
+ output.write(content.replace("[/* json data */]", json.dumps(json_reports)))
+ webbrowser.open_new_tab("file://" + output.name)
diff --git a/etc/patch-trace-template.py b/etc/patch-trace-template.py
index 84400c8e02e..270630bc863 100755
--- a/etc/patch-trace-template.py
+++ b/etc/patch-trace-template.py
@@ -31,7 +31,7 @@ Example:
""")
sys.exit(0)
-rust_source = open(sys.argv[1], 'r')
+rust_source = open(sys.argv[1], "r")
lines = iter(rust_source)
for line in lines:
if line.lstrip().startswith("pub enum ProfilerCategory"):
@@ -53,21 +53,21 @@ plist = ElementTree.ElementTree(ElementTree.fromstring(xml))
elems = iter(plist.findall("./dict/*"))
for elem in elems:
- if elem.tag != 'key' or elem.text != '$objects':
+ if elem.tag != "key" or elem.text != "$objects":
continue
array = elems.next()
break
elems = iter(array.findall("./*"))
for elem in elems:
- if elem.tag != 'string' or elem.text != 'kdebugIntervalRule':
+ if elem.tag != "string" or elem.text != "kdebugIntervalRule":
continue
dictionary = elems.next()
break
elems = iter(dictionary.findall("./*"))
for elem in elems:
- if elem.tag != 'key' or elem.text != 'NS.objects':
+ if elem.tag != "key" or elem.text != "NS.objects":
continue
objects_array = elems.next()
break
@@ -76,33 +76,33 @@ child_count = sum(1 for _ in iter(array.findall("./*")))
for code_pair in code_pairs:
number_index = child_count
- integer = Element('integer')
+ integer = Element("integer")
integer.text = str(int(code_pair[0], 0))
array.append(integer)
child_count += 1
string_index = child_count
- string = Element('string')
+ string = Element("string")
string.text = code_pair[1]
array.append(string)
child_count += 1
- dictionary = Element('dict')
- key = Element('key')
+ dictionary = Element("dict")
+ key = Element("key")
key.text = "CF$UID"
dictionary.append(key)
- integer = Element('integer')
+ integer = Element("integer")
integer.text = str(number_index)
dictionary.append(integer)
objects_array.append(dictionary)
- dictionary = Element('dict')
- key = Element('key')
+ dictionary = Element("dict")
+ key = Element("key")
key.text = "CF$UID"
dictionary.append(key)
- integer = Element('integer')
+ integer = Element("integer")
integer.text = str(string_index)
dictionary.append(integer)
objects_array.append(dictionary)
-plist.write(sys.stdout, encoding='utf-8', xml_declaration=True)
+plist.write(sys.stdout, encoding="utf-8", xml_declaration=True)
diff --git a/etc/profilicate.py b/etc/profilicate.py
index 3fa35b5ba77..ca87b9fe04d 100644
--- a/etc/profilicate.py
+++ b/etc/profilicate.py
@@ -53,17 +53,17 @@ stacks = {}
thread_data = defaultdict(list)
thread_order = {}
for sample in samples:
- if sample['name']:
- name = sample['name']
+ if sample["name"]:
+ name = sample["name"]
else:
- name = "%s %d %d" % (sample['type'], sample['namespace'], sample['index'])
- thread_data[name].append((sample['time'], sample['frames']))
+ name = "%s %d %d" % (sample["type"], sample["namespace"], sample["index"])
+ thread_data[name].append((sample["time"], sample["frames"]))
if name not in thread_order:
- thread_order[name] = (sample['namespace'], sample['index'])
+ thread_order[name] = (sample["namespace"], sample["index"])
tid = 0
threads = []
-for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: thread_order[x[0]]):
+for name, raw_samples in sorted(iter(thread_data.items()), key=lambda x: thread_order[x[0]]):
string_table = StringTable()
tid += 1
@@ -77,13 +77,13 @@ for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: threa
for sample in raw_samples:
prefix = None
for frame in sample[1]:
- if not frame['name']:
+ if not frame["name"]:
continue
- if not frame['name'] in frameMap:
- frameMap[frame['name']] = len(frames)
- frame_index = string_table.get(frame['name'])
+ if frame["name"] not in frameMap:
+ frameMap[frame["name"]] = len(frames)
+ frame_index = string_table.get(frame["name"])
frames.append([frame_index])
- frame = frameMap[frame['name']]
+ frame = frameMap[frame["name"]]
stack_key = "%d,%d" % (frame, prefix) if prefix else str(frame)
if stack_key not in stackMap:
@@ -93,61 +93,63 @@ for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: threa
prefix = stack
samples.append([stack, sample[0]])
- threads.append({
- 'tid': tid,
- 'name': name,
- 'markers': {
- 'schema': {
- 'name': 0,
- 'time': 1,
- 'data': 2,
+ threads.append(
+ {
+ "tid": tid,
+ "name": name,
+ "markers": {
+ "schema": {
+ "name": 0,
+ "time": 1,
+ "data": 2,
+ },
+ "data": [],
},
- 'data': [],
- },
- 'samples': {
- 'schema': {
- 'stack': 0,
- 'time': 1,
- 'responsiveness': 2,
- 'rss': 2,
- 'uss': 4,
- 'frameNumber': 5,
+ "samples": {
+ "schema": {
+ "stack": 0,
+ "time": 1,
+ "responsiveness": 2,
+ "rss": 2,
+ "uss": 4,
+ "frameNumber": 5,
+ },
+ "data": samples,
},
- 'data': samples,
- },
- 'frameTable': {
- 'schema': {
- 'location': 0,
- 'implementation': 1,
- 'optimizations': 2,
- 'line': 3,
- 'category': 4,
+ "frameTable": {
+ "schema": {
+ "location": 0,
+ "implementation": 1,
+ "optimizations": 2,
+ "line": 3,
+ "category": 4,
+ },
+ "data": frames,
},
- 'data': frames,
- },
- 'stackTable': {
- 'schema': {
- 'frame': 0,
- 'prefix': 1,
+ "stackTable": {
+ "schema": {
+ "frame": 0,
+ "prefix": 1,
+ },
+ "data": stacks,
},
- 'data': stacks,
- },
- 'stringTable': string_table.contents(),
- })
+ "stringTable": string_table.contents(),
+ }
+ )
output = {
- 'meta': {
- 'interval': rate,
- 'processType': 0,
- 'product': 'Servo',
- 'stackwalk': 1,
- 'startTime': startTime,
- 'version': 4,
- 'presymbolicated': True,
+ "meta": {
+ "interval": rate,
+ "processType": 0,
+ "product": "Servo",
+ "stackwalk": 1,
+ "startTime": startTime,
+ "version": 4,
+ "presymbolicated": True,
},
- 'libs': [],
- 'threads': threads,
+ "libs": [],
+ "threads": threads,
}
print(json.dumps(output))
diff --git a/etc/run_in_headless_android_emulator.py b/etc/run_in_headless_android_emulator.py
index e25d6884be5..15ee3b87bc5 100755
--- a/etc/run_in_headless_android_emulator.py
+++ b/etc/run_in_headless_android_emulator.py
@@ -27,8 +27,10 @@ def main(avd_name, apk_path, *args):
"-no-window",
"-no-snapshot",
"-no-snapstorage",
- "-gpu", "guest",
- "-port", emulator_port,
+ "-gpu",
+ "guest",
+ "-port",
+ emulator_port,
]
with terminate_on_exit(emulator_args, stdout=sys.stderr) as emulator_process:
# This is hopefully enough time for the emulator to exit
@@ -70,7 +72,6 @@ def main(avd_name, apk_path, *args):
"*:S", # Hide everything else
]
with terminate_on_exit(adb + ["logcat"] + logcat_args) as logcat:
-
# This step needs to happen after application start
forward_webdriver(adb, args)
@@ -84,8 +85,7 @@ def tool_path(directory, bin_name):
if os.path.exists(path):
return path
- path = os.path.join(os.path.dirname(__file__), "..", "android-toolchains", "sdk",
- directory, bin_name)
+ path = os.path.join(os.path.dirname(__file__), "..", "android-toolchains", "sdk", directory, bin_name)
if os.path.exists(path):
return path
@@ -207,8 +207,7 @@ def interrupt(_signum, _frame):
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: %s avd_name apk_path [servo args...]" % sys.argv[0])
- print("Example: %s servo-x86 target/i686-linux-android/release/servo.apk https://servo.org"
- % sys.argv[0])
+ print("Example: %s servo-x86 target/i686-linux-android/release/servo.apk https://servo.org" % sys.argv[0])
sys.exit(1)
try:
diff --git a/etc/servo_automation_screenshot.py b/etc/servo_automation_screenshot.py
index 744d2309853..aa0f97bdaf5 100644
--- a/etc/servo_automation_screenshot.py
+++ b/etc/servo_automation_screenshot.py
@@ -29,16 +29,16 @@ import getopt
def print_help():
-
- print('\nPlease enter the command as shown below: \n')
- print('python3 ./etc/servo_automation_screenshot.py -p <port>'
- + ' -i /path/to/folder/containing/files -r <resolution>'
- + ' -n num_of_files\n')
+ print("\nPlease enter the command as shown below: \n")
+ print(
+ "python3 ./etc/servo_automation_screenshot.py -p <port>"
+ + " -i /path/to/folder/containing/files -r <resolution>"
+ + " -n num_of_files\n"
+ )
def servo_ready_to_accept(url, payload, headers):
- while (True):
-
+ while True:
try:
# Before sending an additional request, we wait for one second each time
time.sleep(1)
@@ -48,45 +48,46 @@ def servo_ready_to_accept(url, payload, headers):
break
except Exception as e:
time.sleep(5)
- print('Exception: ', e)
+ print("Exception: ", e)
return json_string
def ensure_screenshots_directory_exists():
- if not os.path.exists('screenshots'):
- os.makedirs('screenshots')
+ if not os.path.exists("screenshots"):
+ os.makedirs("screenshots")
def render_html_files(num_of_files, url, file_url, json_string, headers, cwd):
for x in range(num_of_files):
-
json_data = {}
- json_data['url'] = 'file://{0}file{1}.html'.format(file_url, str(x))
- print(json_data['url'])
+ json_data["url"] = "file://{0}file{1}.html".format(file_url, str(x))
+ print(json_data["url"])
json_data = json.dumps(json_data)
- requests.post('{0}/{1}/url'.format(url, json_string['value']['sessionId']), data=json_data, headers=headers)
- screenshot_request = requests.get('{0}/{1}/screenshot'.format(url, json_string['value']['sessionId']))
- image_data_encoded = screenshot_request.json()['value']
+ requests.post("{0}/{1}/url".format(url, json_string["value"]["sessionId"]), data=json_data, headers=headers)
+ screenshot_request = requests.get("{0}/{1}/screenshot".format(url, json_string["value"]["sessionId"]))
+ image_data_encoded = screenshot_request.json()["value"]
with open("screenshots/output_image_{0}.png".format(str(x)), "wb") as image_file:
- image_file.write(base64.decodebytes(image_data_encoded.encode('utf-8')))
+ image_file.write(base64.decodebytes(image_data_encoded.encode("utf-8")))
print("################################")
- print("The screenshot is stored in the location: {0}/screenshots/"
- " with filename: output_image_{1}.png".format(cwd, str(x)))
+ print(
+ "The screenshot is stored in the location: {0}/screenshots/ with filename: output_image_{1}.png".format(
+ cwd, str(x)
+ )
+ )
print("################################")
def main(argv): # take inputs from command line by considering the options parameter i.e -h, -p etc.
-
# Local Variables
- port = ''
- resolution = ''
- file_url = ''
- num_of_files = ''
+ port = ""
+ resolution = ""
+ file_url = ""
+ num_of_files = ""
cwd = os.getcwd()
- url = ''
+ url = ""
payload = "{}"
- headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
- json_string = ''
+ headers = {"content-type": "application/json", "Accept-Charset": "UTF-8"}
+ json_string = ""
try:
# input options defined here.
opts, args = getopt.getopt(argv, "p:i:r:n:", ["port=", "ifile=", "resolution=", "num-files="])
@@ -96,7 +97,7 @@ def main(argv): # take inputs from command line by considering the options para
print_help()
sys.exit(2)
for opt, arg in opts:
- if opt == '-h': # -h means help. Displays how to input command line arguments
+ if opt == "-h": # -h means help. Displays how to input command line arguments
print_help()
sys.exit()
elif opt in ("-p", "--port"): # store the value provided with the option -p in port variable.
@@ -108,7 +109,7 @@ def main(argv): # take inputs from command line by considering the options para
elif opt in ("-n", "--num-files"): # store the value provided with the option -n in num_of_files variable.
num_of_files = arg
- url = 'http://localhost:{0}/session'.format(port)
+ url = "http://localhost:{0}/session".format(port)
num_of_files = int(num_of_files)
# Starting servo on specified port
diff --git a/etc/servo_gdb.py b/etc/servo_gdb.py
index 0d535b1b279..7dfa1f4ef97 100644
--- a/etc/servo_gdb.py
+++ b/etc/servo_gdb.py
@@ -68,7 +68,7 @@ class TrustedNodeAddressPrinter:
def children(self):
node_type = gdb.lookup_type("struct script::dom::node::Node").pointer()
value = self.val.cast(node_type)
- return [('Node', value)]
+ return [("Node", value)]
def to_string(self):
return self.val.address
@@ -83,7 +83,7 @@ class NodeTypeIdPrinter:
u8_ptr_type = gdb.lookup_type("u8").pointer()
enum_0 = self.val.address.cast(u8_ptr_type).dereference()
enum_type = self.val.type.fields()[int(enum_0)].type
- return str(enum_type).lstrip('struct ')
+ return str(enum_type).lstrip("struct ")
# Printer for std::Option<>
@@ -113,8 +113,8 @@ class OptionPrinter:
value_type = option_type.fields()[1].type.fields()[1].type
v_size = value_type.sizeof
data_ptr = (ptr + t_size - v_size).cast(value_type.pointer()).dereference()
- return [('Some', data_ptr)]
- return [('None', None)]
+ return [("Some", data_ptr)]
+ return [("None", None)]
def to_string(self):
return None
@@ -130,19 +130,19 @@ class TestPrinter:
type_map = [
- ('struct Au', AuPrinter),
- ('FlowFlags', BitFieldU8Printer),
- ('IntrinsicWidths', ChildPrinter),
- ('PlacementInfo', ChildPrinter),
- ('TrustedNodeAddress', TrustedNodeAddressPrinter),
- ('NodeTypeId', NodeTypeIdPrinter),
- ('Option', OptionPrinter),
+ ("struct Au", AuPrinter),
+ ("FlowFlags", BitFieldU8Printer),
+ ("IntrinsicWidths", ChildPrinter),
+ ("PlacementInfo", ChildPrinter),
+ ("TrustedNodeAddress", TrustedNodeAddressPrinter),
+ ("NodeTypeId", NodeTypeIdPrinter),
+ ("Option", OptionPrinter),
]
def lookup_servo_type(val):
val_type = str(val.type)
- for (type_name, printer) in type_map:
+ for type_name, printer in type_map:
if val_type == type_name or val_type.endswith("::" + type_name):
return printer(val)
return None
diff --git a/etc/start_servo.py b/etc/start_servo.py
index 5c241f4300a..1db08c6eb33 100644
--- a/etc/start_servo.py
+++ b/etc/start_servo.py
@@ -12,13 +12,13 @@ Created on Mon Mar 26 20:08:25 2018
@author: Pranshu Sinha, Abhay Soni, Aayushi Agrawal
The script is intended to start servo on localhost:7002
"""
+
import subprocess
def start_servo(port, resolution):
-
# Use the below command if you are running this script on windows
# cmds = 'mach.bat run --webdriver ' + port + ' --window-size ' + resolution
- cmds = './mach run --webdriver=' + port + ' --window-size ' + resolution
+ cmds = "./mach run --webdriver=" + port + " --window-size " + resolution
process = subprocess.Popen(cmds, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process
diff --git a/etc/wpt-summarize.py b/etc/wpt-summarize.py
index 2b3fb8a0716..fe67f3b8453 100644
--- a/etc/wpt-summarize.py
+++ b/etc/wpt-summarize.py
@@ -21,7 +21,7 @@
import sys
import json
-full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
+full_search = len(sys.argv) > 3 and sys.argv[3] == "--full"
with open(sys.argv[1]) as f:
data = f.readlines()
@@ -34,13 +34,9 @@ with open(sys.argv[1]) as f:
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
- if ("action" in entry
- and entry["action"] == "test_start"
- and entry["test"] == sys.argv[2]):
+ if "action" in entry and entry["action"] == "test_start" and entry["test"] == sys.argv[2]:
thread = entry["thread"]
print(json.dumps(entry))
- elif (full_search
- and "command" in entry
- and sys.argv[2] in entry["command"]):
+ elif full_search and "command" in entry and sys.argv[2] in entry["command"]:
thread = entry["thread"]
print(json.dumps(entry))
diff --git a/etc/wpt-timing.py b/etc/wpt-timing.py
index 7dc50532bf2..e023a024b92 100644
--- a/etc/wpt-timing.py
+++ b/etc/wpt-timing.py
@@ -45,9 +45,7 @@ def process_log(data):
elif entry["action"] == "test_end":
test = tests[entry["test"]]
test["end"] = int(entry["time"])
- test_results[entry["status"]] += [
- (entry["test"], test["end"] - test["start"])
- ]
+ test_results[entry["status"]] += [(entry["test"], test["end"] - test["start"])]
return test_results
@@ -73,24 +71,18 @@ print("%d tests timed out." % len(test_results["TIMEOUT"]))
longest_crash = sorted(test_results["CRASH"], key=lambda x: x[1], reverse=True)
print("Longest CRASH test took %dms (%s)" % (longest_crash[0][1], longest_crash[0][0]))
-longest_ok = sorted(
- test_results["PASS"] + test_results["OK"],
- key=lambda x: x[1], reverse=True
-)
-csv_data = [['Test path', 'Milliseconds']]
-with open('longest_ok.csv', 'w') as csv_file:
+longest_ok = sorted(test_results["PASS"] + test_results["OK"], key=lambda x: x[1], reverse=True)
+csv_data = [["Test path", "Milliseconds"]]
+with open("longest_ok.csv", "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data + longest_ok)
-longest_fail = sorted(
- test_results["ERROR"] + test_results["FAIL"],
- key=lambda x: x[1], reverse=True
-)
-with open('longest_err.csv', 'w') as csv_file:
+longest_fail = sorted(test_results["ERROR"] + test_results["FAIL"], key=lambda x: x[1], reverse=True)
+with open("longest_err.csv", "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data + longest_fail)
longest_timeout = sorted(test_results["TIMEOUT"], key=lambda x: x[1], reverse=True)
-with open('timeouts.csv', 'w') as csv_file:
+with open("timeouts.csv", "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data + longest_timeout)
diff --git a/etc/wpt_result_analyzer.py b/etc/wpt_result_analyzer.py
index 9964304eb53..0d79355e670 100644
--- a/etc/wpt_result_analyzer.py
+++ b/etc/wpt_result_analyzer.py
@@ -20,8 +20,8 @@
import os
-test_root = os.path.join('tests', 'wpt', 'tests')
-meta_root = os.path.join('tests', 'wpt', 'meta')
+test_root = os.path.join("tests", "wpt", "tests")
+meta_root = os.path.join("tests", "wpt", "meta")
test_counts = {}
meta_counts = {}
@@ -35,7 +35,7 @@ for base_dir, dir_names, files in os.walk(test_root):
continue
test_files = []
- exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
+ exts = [".html", ".htm", ".xht", ".xhtml", ".window.js", ".worker.js", ".any.js"]
for f in files:
for ext in exts:
if f.endswith(ext):
@@ -48,21 +48,21 @@ for base_dir, dir_names, files in os.walk(meta_root):
rel_base = os.path.relpath(base_dir, meta_root)
num_files = len(files)
- if '__dir__.ini' in files:
+ if "__dir__.ini" in files:
num_files -= 1
meta_counts[rel_base] = num_files
final_counts = []
-for (test_dir, test_count) in test_counts.items():
+for test_dir, test_count in test_counts.items():
if not test_count:
continue
meta_count = meta_counts.get(test_dir, 0)
final_counts += [(test_dir, test_count, meta_count)]
-print('Test counts')
-print('dir: %% failed (num tests / num failures)')
+print("Test counts")
+print("dir: %% failed (num tests / num failures)")
s = sorted(final_counts, key=lambda x: x[2] / x[1])
-for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
+for test_dir, test_count, meta_count in reversed(sorted(s, key=lambda x: x[2])):
if not meta_count:
continue
- print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
+ print("%s: %.2f%% (%d / %d)" % (test_dir, meta_count / test_count * 100, test_count, meta_count))