aboutsummaryrefslogtreecommitdiffstats
path: root/tests/wpt/update/fetchlogs.py
diff options
context:
space:
mode:
authorMartin Robinson <mrobinson@igalia.com>2023-04-10 13:56:47 +0200
committerMartin Robinson <mrobinson@igalia.com>2023-04-10 23:04:17 +0200
commitbc3abf995313ece462233c914f4aa61b76566e66 (patch)
tree48d83efc5f52ab9118dc09dd229ffee4ade9ceb3 /tests/wpt/update/fetchlogs.py
parentd579bd91b8e82606907dd789f13f6ecaee6a9b18 (diff)
downloadservo-bc3abf995313ece462233c914f4aa61b76566e66.tar.gz
servo-bc3abf995313ece462233c914f4aa61b76566e66.zip
Remove more Taskcluster and Treeherder integration
Servo no longer uses Taskcluster and Treeherder, so this change removes script references to those services and support files.
Diffstat (limited to 'tests/wpt/update/fetchlogs.py')
-rw-r--r--tests/wpt/update/fetchlogs.py99
1 files changed, 0 insertions, 99 deletions
diff --git a/tests/wpt/update/fetchlogs.py b/tests/wpt/update/fetchlogs.py
deleted file mode 100644
index 385f2c54174..00000000000
--- a/tests/wpt/update/fetchlogs.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at https://mozilla.org/MPL/2.0/.
-
-import argparse
-import cStringIO
-import gzip
-import json
-import os
-import requests
-import six.moves.urllib as urllib
-
-treeherder_base = "https://treeherder.mozilla.org/"
-
-"""Simple script for downloading structured logs from treeherder.
-
-For the moment this is specialised to work with web-platform-tests
-logs; in due course it should move somewhere generic and get hooked
-up to mach or similar"""
-
-# Interpretation of the "job" list from
-# https://github.com/mozilla/treeherder-service/blob/master/treeherder/webapp/api/utils.py#L18
-
-def create_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument("branch", action="store",
- help="Branch on which jobs ran")
- parser.add_argument("commit",
- action="store",
- help="Commit hash for push")
-
- return parser
-
-def download(url, prefix, dest, force_suffix=True):
- if dest is None:
- dest = "."
-
- if prefix and not force_suffix:
- name = os.path.join(dest, prefix + ".log")
- else:
- name = None
- counter = 0
-
- while not name or os.path.exists(name):
- counter += 1
- sep = "" if not prefix else "-"
- name = os.path.join(dest, prefix + sep + str(counter) + ".log")
-
- with open(name, "wb") as f:
- resp = requests.get(url, stream=True)
- for chunk in resp.iter_content(1024):
- f.write(chunk)
-
-def get_blobber_url(branch, job):
- job_id = job["id"]
- resp = requests.get(urllib.parse.urljoin(treeherder_base,
- "/api/project/%s/artifact/?job_id=%i&name=Job%%20Info" % (branch,
- job_id)))
- job_data = resp.json()
-
- if job_data:
- assert len(job_data) == 1
- job_data = job_data[0]
- try:
- details = job_data["blob"]["job_details"]
- for item in details:
- if item["value"] == "wpt_raw.log":
- return item["url"]
- except:
- return None
-
-
-def get_structured_logs(branch, commit, dest=None):
- resp = requests.get(urllib.parse.urljoin(treeherder_base, "/api/project/%s/resultset/?revision=%s" % (branch, commit)))
-
- revision_data = resp.json()
-
- result_set = revision_data["results"][0]["id"]
-
- resp = requests.get(urllib.parse.urljoin(treeherder_base, "/api/project/%s/jobs/?result_set_id=%s&count=2000&exclusion_profile=false" % (branch, result_set)))
-
- job_data = resp.json()
-
- for result in job_data["results"]:
- job_type_name = result["job_type_name"]
- if job_type_name.startswith("W3C Web Platform"):
- url = get_blobber_url(branch, result)
- if url:
- prefix = result["platform"] # platform
- download(url, prefix, None)
-
-def main():
- parser = create_parser()
- args = parser.parse_args()
-
- get_structured_logs(args.branch, args.commit)
-
-if __name__ == "__main__":
- main()