summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKlaus Aehlig <klaus.aehlig@huawei.com>2023-09-14 16:28:32 +0200
committerKlaus Aehlig <klaus.aehlig@huawei.com>2023-09-15 14:42:47 +0200
commit3c14ffe4648e694faadce7bd3f5e60a50413126a (patch)
treea80c483e7ea135999514fa3047e9fb092821b507
parentf821e6b70c59037384ac6afb3a44517fe46953e6 (diff)
downloadjustbuild-3c14ffe4648e694faadce7bd3f5e60a50413126a.tar.gz
Add infrastructure for end-to-end tests using just serve
-rw-r--r--test/end-to-end/EXPRESSIONS18
-rw-r--r--test/end-to-end/RULES231
-rwxr-xr-xtest/end-to-end/with_serve_test_runner.py232
3 files changed, 476 insertions, 5 deletions
diff --git a/test/end-to-end/EXPRESSIONS b/test/end-to-end/EXPRESSIONS
index 5ef8b01d..632c0b21 100644
--- a/test/end-to-end/EXPRESSIONS
+++ b/test/end-to-end/EXPRESSIONS
@@ -3,7 +3,10 @@
[ "TEST_ENV"
, "ATTEMPT"
, "name"
+ , "test type name"
, "test.sh"
+ , "data"
+ , "extra_infra"
, "keep"
, "transition"
, "TEST_COMPATIBLE_REMOTE"
@@ -66,6 +69,7 @@
, "remotestdout"
, "remotestderr"
]
+ , {"type": "var", "name": "extra_infra"}
, { "type": "foreach"
, "var": "filename"
, "range": {"type": "var", "name": "keep"}
@@ -87,6 +91,7 @@
, {"type": "var", "name": "just"}
, {"type": "var", "name": "runner"}
, {"type": "var", "name": "test.sh"}
+ , {"type": "var", "name": "data"}
, {"type": "var", "name": "attempt marker"}
]
}
@@ -132,7 +137,9 @@
, "then": "compatible"
, "else": "native"
}
- , " remote execution "
+ , " "
+ , {"type": "var", "name": "test type name"}
+ , " "
, {"type": "var", "name": "name"}
, " failed"
]
@@ -149,7 +156,9 @@
, "fail_message":
{ "type": "join"
, "$1":
- [ "shell test with remote execution"
+ [ "shell test with "
+ , {"type": "var", "name": "test type name"}
+ , " "
, {"type": "var", "name": "name"}
, " failed (Run "
, {"type": "var", "name": "ATTEMPT"}
@@ -164,10 +173,11 @@
{ "vars":
[ "TEST_ENV"
, "name"
+ , "test type name"
, "test.sh"
+ , "data"
+ , "extra_infra"
, "keep"
- , "runner"
- , "deps-fieldname"
, "transition"
, "TEST_COMPATIBLE_REMOTE"
, "TEST_REMOTE_EXECUTION"
diff --git a/test/end-to-end/RULES b/test/end-to-end/RULES
index aef5a668..13efd588 100644
--- a/test/end-to-end/RULES
+++ b/test/end-to-end/RULES
@@ -87,7 +87,10 @@
, "expression":
{ "type": "let*"
, "bindings":
- [ [ "test.sh"
+ [ ["test type name", "remote execution"]
+ , ["extra_infra", []]
+ , ["data", {"type": "empty_map"}]
+ , [ "test.sh"
, { "type": "context"
, "msg": "Expecting 'test' to specify precisely one file containing a shell script"
, "$1":
@@ -205,4 +208,230 @@
}
}
}
+, "with serve":
+ { "doc":
+ [ "Shell test, given by a test script,"
+ , "implictly assuming a remote execution and a just serve instance"
+ , "in the background."
+ ]
+ , "target_fields": ["deps", "test", "repos"]
+ , "string_fields": ["keep", "name"]
+ , "config_vars": ["ARCH", "HOST_ARCH", "RUNS_PER_TEST", "TEST_ENV"]
+ , "field_doc":
+ { "test":
+ [ "The shell script for the test, launched with sh."
+ , ""
+ , "An empty directory is created to store any temporary files needed"
+ , "by the test, and it is made available in the environment variable"
+ , "TEST_TMPDIR. The test should not assume write permissions"
+ , "outside the working directory and the TEST_TMPDIR."
+ , "For convenience, the environment variable TMPDIR is also set to TEST_TMPDIR."
+ ]
+ , "name":
+ [ "A name for the test, used in reporting, as well as for staging"
+ , "the test result tree in the runfiles"
+ ]
+ , "keep":
+ [ "List of names (relative to the test working directory) of files that"
+ , "the test might generate that should be kept as part of the output."
+ , "This might be useful for further analysis of the test"
+ ]
+ , "deps":
+ [ "Any targets that should be staged (with artifacts and runfiles) into"
+ , "the tests working directory"
+ ]
+ , "repos":
+ [ "The trees, one per entry, that the just serve instance should have"
+ , "available. The respective commits will be generated and passed to the"
+ , "test script as COMMIT_0, COMMIT_1, etc."
+ ]
+ }
+ , "config_doc":
+ { "RUNS_PER_TEST":
+ [ "The number of times the test should be run in order to detect flakyness."
+ , "If set, no test action will be taken from cache."
+ ]
+ , "TEST_ENV": ["The environment for executing the test runner."]
+ }
+ , "tainted": ["test"]
+ , "artifacts_doc":
+ [ "result: the result of this test (\"PASS\" or \"FAIL\"); useful for"
+ , " generating test reports."
+ , "stdout/stderr: Any output the invocation of the test binary produced on"
+ , " the respective file descriptor"
+ , "remotestdout/remotestderr: Any output of the remote-execution server"
+ , " implicit to that test"
+ , "servestdout/servestderr: Any output of the serve-execution server"
+ , " implicit to that test"
+ , "work: In this directory, all the files specified to \"keep\" are staged"
+ , "time-start/time-stop: The time (decimally coded) in seconds since the"
+ , " epoch when the test invocation started and ended."
+ ]
+ , "runfiles_doc":
+ [ "A tree consisting of the artifacts staged at the name of the test."
+ , "As the built-in \"install\" rule only takes the runfiles of its \"deps\""
+ , "argument, this gives an easy way of defining test suites."
+ ]
+ , "implicit":
+ { "runner": ["with_serve_test_runner.py"]
+ , "summarizer": [["@", "rules", "shell/test", "summarizer"]]
+ , "just": [["@", "src", "", "installed just"]]
+ }
+ , "imports":
+ { "test-result": "test-result"
+ , "action": "test-action"
+ , "stage": ["@", "rules", "", "stage_singleton_field"]
+ , "host transition": ["@", "rules", "transitions", "for host"]
+ }
+ , "config_transitions":
+ { "deps": [{"type": "CALL_EXPRESSION", "name": "host transition"}]
+ , "repos": [{"type": "CALL_EXPRESSION", "name": "host transition"}]
+ , "just": [{"type": "CALL_EXPRESSION", "name": "host transition"}]
+ , "runner": [{"type": "CALL_EXPRESSION", "name": "host transition"}]
+ }
+ , "expression":
+ { "type": "let*"
+ , "bindings":
+ [ ["test type name", "target-level cache serving"]
+ , ["extra_infra", ["servestdout", "servestderr"]]
+ , [ "test.sh"
+ , { "type": "context"
+ , "msg": "Expecting 'test' to specify precisely one file containing a shell script"
+ , "$1":
+ { "type": "let*"
+ , "bindings": [["fieldname", "test"], ["location", "test.sh"]]
+ , "body": {"type": "CALL_EXPRESSION", "name": "stage"}
+ }
+ }
+ ]
+ , [ "name"
+ , { "type": "assert_non_empty"
+ , "msg": "Have to provide a non-empty name for the test (e.g., for result staging)"
+ , "$1": {"type": "join", "$1": {"type": "FIELD", "name": "name"}}
+ }
+ ]
+ , ["keep", {"type": "FIELD", "name": "keep"}]
+ , ["deps-fieldname", "deps"]
+ , ["transition", {"type": "CALL_EXPRESSION", "name": "host transition"}]
+ , [ "data"
+ , { "type": "to_subdir"
+ , "subdir": "data"
+ , "$1":
+ { "type": "map_union"
+ , "$1":
+ { "type": "foreach_map"
+ , "range":
+ {"type": "enumerate", "$1": {"type": "FIELD", "name": "repos"}}
+ , "body":
+ { "type": "to_subdir"
+ , "subdir": {"type": "var", "name": "_"}
+ , "$1":
+ { "type": "DEP_ARTIFACTS"
+ , "dep": {"type": "var", "name": "$_"}
+ , "transition": {"type": "var", "name": "transition"}
+ }
+ }
+ }
+ }
+ }
+ ]
+ ]
+ , "body":
+ { "type": "if"
+ , "cond": {"type": "var", "name": "RUNS_PER_TEST"}
+ , "else": {"type": "CALL_EXPRESSION", "name": "test-result"}
+ , "then":
+ { "type": "let*"
+ , "bindings":
+ [ [ "attempts"
+ , { "type": "map_union"
+ , "$1":
+ { "type": "foreach"
+ , "var": "ATTEMPT"
+ , "range":
+ { "type": "range"
+ , "$1": {"type": "var", "name": "RUNS_PER_TEST"}
+ }
+ , "body":
+ { "type": "singleton_map"
+ , "key": {"type": "var", "name": "ATTEMPT"}
+ , "value":
+ { "type": "TREE"
+ , "$1": {"type": "CALL_EXPRESSION", "name": "action"}
+ }
+ }
+ }
+ }
+ ]
+ , [ "summarizer"
+ , { "type": "map_union"
+ , "$1":
+ { "type": "foreach"
+ , "var": "x"
+ , "range": {"type": "FIELD", "name": "summarizer"}
+ , "body":
+ { "type": "map_union"
+ , "$1":
+ { "type": "foreach"
+ , "var": "x"
+ , "range":
+ { "type": "values"
+ , "$1":
+ { "type": "DEP_ARTIFACTS"
+ , "dep": {"type": "var", "name": "x"}
+ }
+ }
+ , "body":
+ { "type": "singleton_map"
+ , "key": "summarizer"
+ , "value": {"type": "var", "name": "x"}
+ }
+ }
+ }
+ }
+ }
+ ]
+ , [ "summary"
+ , { "type": "ACTION"
+ , "inputs":
+ { "type": "map_union"
+ , "$1":
+ [ {"type": "var", "name": "attempts"}
+ , {"type": "var", "name": "summarizer"}
+ ]
+ }
+ , "outs":
+ ["stdout", "stderr", "result", "time-start", "time-stop"]
+ , "cmd": ["./summarizer"]
+ }
+ ]
+ , [ "artifacts"
+ , { "type": "map_union"
+ , "$1":
+ [ {"type": "var", "name": "summary"}
+ , { "type": "singleton_map"
+ , "key": "work"
+ , "value":
+ {"type": "TREE", "$1": {"type": "var", "name": "attempts"}}
+ }
+ ]
+ }
+ ]
+ , [ "runfiles"
+ , { "type": "singleton_map"
+ , "key": {"type": "var", "name": "name"}
+ , "value":
+ {"type": "TREE", "$1": {"type": "var", "name": "artifacts"}}
+ }
+ ]
+ ]
+ , "body":
+ { "type": "RESULT"
+ , "artifacts": {"type": "var", "name": "artifacts"}
+ , "runfiles": {"type": "var", "name": "runfiles"}
+ }
+ }
+ }
+ }
+ }
}
diff --git a/test/end-to-end/with_serve_test_runner.py b/test/end-to-end/with_serve_test_runner.py
new file mode 100755
index 00000000..dbbd4237
--- /dev/null
+++ b/test/end-to-end/with_serve_test_runner.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python3
+# Copyright 2023 Huawei Cloud Computing Technology Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import shutil
+import subprocess
+import sys
+import time
+
+from typing import Any, Dict, List
+
+Json = Dict[str, Any]
+
+time_start: float = time.time()
+time_stop: float = 0
+result: str = "UNKNOWN"
+stderr: str = ""
+stdout: str = ""
+
+
+def dump_results() -> None:
+ with open("result", "w") as f:
+ f.write("%s\n" % (result, ))
+ with open("time-start", "w") as f:
+ f.write("%d\n" % (time_start, ))
+ with open("time-stop", "w") as f:
+ f.write("%d\n" % (time_stop, ))
+ with open("stdout", "w") as f:
+ f.write("%s\n" % (stdout, ))
+ with open("stderr", "w") as f:
+ f.write("%s\n" % (stderr, ))
+
+
+def get_remote_execution_address(d: Json) -> str:
+ return "%s:%d" % (d["interface"], int(d["port"]))
+
+
+dump_results()
+
+TEMP_DIR = os.path.realpath("scratch")
+os.makedirs(TEMP_DIR, exist_ok=True)
+
+WORK_DIR = os.path.realpath("work")
+os.makedirs(WORK_DIR, exist_ok=True)
+
+REMOTE_DIR = os.path.realpath("remote")
+os.makedirs(REMOTE_DIR, exist_ok=True)
+REMOTE_LBR = os.path.join(REMOTE_DIR, "build-root")
+
+g_REMOTE_EXECUTION_ADDRESS: str = ""
+
+SERVE_DIR = os.path.realpath("serve")
+SERVE_LBR = os.path.join(SERVE_DIR, "build-root")
+
+remote_proc = None
+
+# start just execute as remote service
+REMOTE_INFO = os.path.join(REMOTE_DIR, "remote-info.json")
+
+if os.path.exists(REMOTE_INFO):
+ print(f"Warning: removing unexpected info file {REMOTE_INFO}")
+ os.remove(REMOTE_INFO)
+
+remote_cmd = [
+ "./bin/just",
+ "execute",
+ "--info-file",
+ REMOTE_INFO,
+ "--local-build-root",
+ REMOTE_LBR,
+ "--log-limit",
+ "6",
+ "--plain-log",
+]
+
+remotestdout = open("remotestdout", "w")
+remotestderr = open("remotestderr", "w")
+remote_proc = subprocess.Popen(
+ remote_cmd,
+ stdout=remotestdout,
+ stderr=remotestderr,
+)
+
+while not os.path.exists(REMOTE_INFO):
+ time.sleep(1)
+
+with open(REMOTE_INFO) as f:
+ info = json.load(f)
+
+g_REMOTE_EXECUTION_ADDRESS = get_remote_execution_address(info)
+
+# start just serve service
+SERVE_INFO = os.path.join(REMOTE_DIR, "serve-info.json")
+SERVE_CONFIG_FILE = os.path.join(REMOTE_DIR, "serve.json")
+
+serve_config = {
+ "local build root": SERVE_DIR,
+ "logging": {"limit": 6, "plain": True},
+ "execution endpoint": {"address": g_REMOTE_EXECUTION_ADDRESS},
+ "remote service": {"info file": SERVE_INFO},
+}
+
+repositories : List[str] = []
+repos_env: Dict[str, str] = {}
+
+REPOS_DIR = os.path.realpath("repos")
+os.makedirs(REPOS_DIR, exist_ok=True)
+DATA_DIR = os.path.realpath("data")
+os.makedirs(DATA_DIR, exist_ok=True)
+
+GIT_NOBODY_ENV: Dict[str, str] = {
+ "GIT_AUTHOR_DATE": "1970-01-01T00:00Z",
+ "GIT_AUTHOR_NAME": "Nobody",
+ "GIT_AUTHOR_EMAIL": "nobody@example.org",
+ "GIT_COMMITTER_DATE": "1970-01-01T00:00Z",
+ "GIT_COMMITTER_NAME": "Nobody",
+ "GIT_COMMITTER_EMAIL": "nobody@example.org",
+ "GIT_CONFIG_GLOBAL": "/dev/null",
+ "GIT_CONFIG_SYSTEM": "/dev/null",
+}
+
+count = 0
+for repo in os.listdir("data"):
+ target = os.path.join(REPOS_DIR, repo)
+ shutil.copytree(
+ os.path.join(DATA_DIR, repo),
+ target,
+ )
+ subprocess.run(
+ ["git", "init"],
+ cwd=target,
+ env=dict(os.environ, **GIT_NOBODY_ENV),
+ stdout = subprocess.DEVNULL,
+ stderr = subprocess.DEVNULL,
+ )
+ subprocess.run(
+ ["git", "add", "-f", "."],
+ cwd=target,
+ env=dict(os.environ, **GIT_NOBODY_ENV),
+ stdout = subprocess.DEVNULL,
+ stderr = subprocess.DEVNULL,
+ )
+ subprocess.run(
+ ["git", "commit", "-m",
+ "Content of %s" % (target,)],
+ cwd=target,
+ env=dict(os.environ, **GIT_NOBODY_ENV),
+ stdout = subprocess.DEVNULL,
+ stderr = subprocess.DEVNULL,
+ )
+ repositories.append(target)
+ repos_env["COMMIT_%d" % count] = subprocess.run(
+ ["git", "log", "-n", "1", "--format=%H"],
+ stdout=subprocess.PIPE,
+ stderr = subprocess.DEVNULL,
+ cwd=target).stdout.decode('utf-8').strip()
+
+serve_config["repositories"] = repositories
+
+with open(SERVE_CONFIG_FILE, "w") as f:
+ json.dump(serve_config, f)
+
+servestdout = open("servestdout", "w")
+servestderr = open("servestderr", "w")
+serve_proc = subprocess.Popen(
+ ["./bin/just", "serve", SERVE_CONFIG_FILE],
+ stdout=servestdout,
+ stderr=servestderr,
+)
+
+while not os.path.exists(SERVE_INFO):
+ time.sleep(1)
+
+with open(SERVE_INFO) as f:
+ serve_info = json.load(f)
+
+SERVE_ADDRESS = get_remote_execution_address(serve_info)
+
+# run the actual test
+
+ENV = dict(
+ os.environ,
+ TEST_TMPDIR=TEMP_DIR,
+ TMPDIR=TEMP_DIR,
+ REMOTE_EXECUTION_ADDRESS=g_REMOTE_EXECUTION_ADDRESS,
+ SERVE=SERVE_ADDRESS,
+ **repos_env
+)
+
+if "COMPATIBLE" in ENV:
+ del ENV["COMPATIBLE"]
+
+for k in ["TLS_CA_CERT", "TLS_CLIENT_CERT", "TLS_CLIENT_KEY"]:
+ if k in ENV:
+ del ENV[k]
+
+time_start = time.time()
+ret = subprocess.run(["sh", "../test.sh"],
+ cwd=WORK_DIR,
+ env=ENV,
+ capture_output=True)
+
+time_stop = time.time()
+result = "PASS" if ret.returncode == 0 else "FAIL"
+stdout = ret.stdout.decode("utf-8")
+stderr = ret.stderr.decode("utf-8")
+
+assert remote_proc
+remote_proc.terminate()
+rout, rerr = remote_proc.communicate()
+
+dump_results()
+
+for f in sys.argv[2:]:
+ keep_file = os.path.join(WORK_DIR, f)
+ if not os.path.exists(keep_file):
+ open(keep_file, "a").close()
+
+if result != "PASS": exit(1)