summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorKlaus Aehlig <klaus.aehlig@huawei.com>2023-02-24 15:09:49 +0100
committerSascha Roloff <sascha.roloff@huawei.com>2023-03-06 17:17:21 +0100
commit27755862921509cf3d64d730519fad3d0710c0b7 (patch)
treed3d60687c1a2bc7e8d4d39d9b7f280974ef76603 /test
parent00adcbad4162527bd750aba69306d2d7c36ed0af (diff)
downloadjustbuild-27755862921509cf3d64d730519fad3d0710c0b7.tar.gz
Add rule for shell tests with remote execution
... similar to ["@", "rules", "shell/test", "script"], but implicitly starting a remote execution in the background, passing the information about that endpoint to the test via an environment variable.
Diffstat (limited to 'test')
-rw-r--r--test/end-to-end/EXPRESSIONS190
-rw-r--r--test/end-to-end/RULES200
-rwxr-xr-xtest/end-to-end/with_remote_test_runner.py124
3 files changed, 514 insertions, 0 deletions
diff --git a/test/end-to-end/EXPRESSIONS b/test/end-to-end/EXPRESSIONS
new file mode 100644
index 00000000..20b9cb6a
--- /dev/null
+++ b/test/end-to-end/EXPRESSIONS
@@ -0,0 +1,190 @@
+{ "test-action":
+ { "vars":
+ [ "TEST_ENV"
+ , "ATTEMPT"
+ , "name"
+ , "test.sh"
+ , "keep"
+ , "transition"
+ , "TEST_COMPATIBLE_REMOTE"
+ ]
+ , "imports":
+ { "artifacts_list": ["@", "rules", "", "field_artifacts_list"]
+ , "runfiles_list": ["@", "rules", "", "field_runfiles_list"]
+ , "stage": ["@", "rules", "", "stage_singleton_field"]
+ }
+ , "expression":
+ { "type": "let*"
+ , "bindings":
+ [ ["fieldname", "runner"]
+ , ["location", "runner"]
+ , ["runner", {"type": "CALL_EXPRESSION", "name": "stage"}]
+ , ["fieldname", "just"]
+ , ["location", "just"]
+ , ["just", {"type": "CALL_EXPRESSION", "name": "stage"}]
+ , ["fieldname", "deps"]
+ , [ "deps"
+ , { "type": "TREE"
+ , "$1":
+ { "type": "disjoint_map_union"
+ , "msg": "Field \"deps\" has to stage in a conflict free way"
+ , "$1":
+ { "type": "++"
+ , "$1":
+ [ {"type": "CALL_EXPRESSION", "name": "runfiles_list"}
+ , {"type": "CALL_EXPRESSION", "name": "artifacts_list"}
+ ]
+ }
+ }
+ }
+ ]
+ , [ "attempt marker"
+ , { "type": "if"
+ , "cond":
+ { "type": "=="
+ , "$1": {"type": "var", "name": "ATTEMPT"}
+ , "$2": null
+ }
+ , "then": {"type": "empty_map"}
+ , "else":
+ { "type": "singleton_map"
+ , "key": "ATTEMPT"
+ , "value":
+ {"type": "BLOB", "data": {"type": "var", "name": "ATTEMPT"}}
+ }
+ }
+ ]
+ , [ "outs"
+ , { "type": "++"
+ , "$1":
+ [ [ "result"
+ , "stdout"
+ , "stderr"
+ , "time-start"
+ , "time-stop"
+ , "remotestdout"
+ , "remotestderr"
+ ]
+ , { "type": "foreach"
+ , "var": "filename"
+ , "range": {"type": "var", "name": "keep"}
+ , "body":
+ { "type": "join"
+ , "$1": ["work/", {"type": "var", "name": "filename"}]
+ }
+ }
+ ]
+ }
+ ]
+ , [ "inputs"
+ , { "type": "map_union"
+ , "$1":
+ [ { "type": "singleton_map"
+ , "key": "work"
+ , "value": {"type": "var", "name": "deps"}
+ }
+ , {"type": "var", "name": "just"}
+ , {"type": "var", "name": "runner"}
+ , {"type": "var", "name": "test.sh"}
+ , {"type": "var", "name": "attempt marker"}
+ ]
+ }
+ ]
+ , [ "cmd"
+ , { "type": "++"
+ , "$1":
+ [ [ "./runner"
+ , { "type": "if"
+ , "cond": {"type": "var", "name": "TEST_COMPATIBLE_REMOTE"}
+ , "then": "true"
+ , "else": "false"
+ }
+ ]
+ , {"type": "var", "name": "keep"}
+ ]
+ }
+ ]
+ , [ "test_env"
+ , {"type": "var", "name": "TEST_ENV", "default": {"type": "empty_map"}}
+ ]
+ ]
+ , "body":
+ { "type": "if"
+ , "cond":
+ {"type": "==", "$1": {"type": "var", "name": "ATTEMPT"}, "$2": null}
+ , "then":
+ { "type": "ACTION"
+ , "outs": {"type": "var", "name": "outs"}
+ , "inputs": {"type": "var", "name": "inputs"}
+ , "cmd": {"type": "var", "name": "cmd"}
+ , "env": {"type": "var", "name": "test_env"}
+ , "may_fail": ["test"]
+ , "fail_message":
+ { "type": "join"
+ , "$1":
+ [ "shell test with "
+ , { "type": "if"
+ , "cond": {"type": "var", "name": "TEST_COMPATIBLE_REMOTE"}
+ , "then": "compatible"
+ , "else": "native"
+ }
+ , " remote execution "
+ , {"type": "var", "name": "name"}
+ , " failed"
+ ]
+ }
+ }
+ , "else":
+ { "type": "ACTION"
+ , "outs": {"type": "var", "name": "outs"}
+ , "inputs": {"type": "var", "name": "inputs"}
+ , "cmd": {"type": "var", "name": "cmd"}
+ , "env": {"type": "var", "name": "test_env"}
+ , "may_fail": ["test"]
+ , "no_cache": ["test"]
+ , "fail_message":
+ { "type": "join"
+ , "$1":
+ [ "shell test with remote execution"
+ , {"type": "var", "name": "name"}
+ , " failed (Run "
+ , {"type": "var", "name": "ATTEMPT"}
+ , ")"
+ ]
+ }
+ }
+ }
+ }
+ }
+, "test-result":
+ { "vars":
+ [ "TEST_ENV"
+ , "name"
+ , "test.sh"
+ , "keep"
+ , "runner"
+ , "deps-fieldname"
+ , "transition"
+ , "TEST_COMPATIBLE_REMOTE"
+ ]
+ , "imports": {"action": "test-action"}
+ , "expression":
+ { "type": "let*"
+ , "bindings":
+ [ ["test-results", {"type": "CALL_EXPRESSION", "name": "action"}]
+ , [ "runfiles"
+ , { "type": "singleton_map"
+ , "key": {"type": "var", "name": "name"}
+ , "value":
+ {"type": "TREE", "$1": {"type": "var", "name": "test-results"}}
+ }
+ ]
+ ]
+ , "body":
+ { "type": "RESULT"
+ , "artifacts": {"type": "var", "name": "test-results"}
+ , "runfiles": {"type": "var", "name": "runfiles"}
+ }
+ }
+ }
+}
diff --git a/test/end-to-end/RULES b/test/end-to-end/RULES
new file mode 100644
index 00000000..6c55e8cf
--- /dev/null
+++ b/test/end-to-end/RULES
@@ -0,0 +1,200 @@
+{ "with remote":
+ { "doc":
+ [ "Shell test, given by a test script,"
+ , "implictly assuming a remote execution in the background."
+ ]
+ , "target_fields": ["deps", "test"]
+ , "string_fields": ["keep", "name"]
+ , "config_vars":
+ [ "ARCH"
+ , "HOST_ARCH"
+ , "RUNS_PER_TEST"
+ , "TEST_ENV"
+ , "TEST_COMPATIBLE_REMOTE"
+ ]
+ , "field_doc":
+ { "test":
+ [ "The shell script for the test, launched with sh."
+ , ""
+ , "An empty directory is created to store any temporary files needed"
+ , "by the test, and it is made available in the environment variable"
+ , "TEST_TMPDIR. The test should not assume write permissions"
+ , "outside the working directory and the TEST_TMPDIR."
+ , "For convenience, the environment variable TMPDIR is also set to TEST_TMPDIR."
+ ]
+ , "name":
+ [ "A name for the test, used in reporting, as well as for staging"
+ , "the test result tree in the runfiles"
+ ]
+ , "keep":
+ [ "List of names (relative to the test working directory) of files that"
+ , "the test might generate that should be kept as part of the output."
+ , "This might be useful for further analysis of the test"
+ ]
+ , "deps":
+ [ "Any targets that should be staged (with artifacts and runfiles) into"
+ , "the tests working directory"
+ ]
+ }
+ , "config_doc":
+ { "RUNS_PER_TEST":
+ [ "The number of times the test should be run in order to detect flakyness."
+ , "If set, no test action will be taken from cache."
+ ]
+ , "TEST_ENV": ["The environment for executing the test runner."]
+ , "TEST_COMPATIBLE_REMOTE":
+ ["If true, run the remote execution in compatible mode."]
+ }
+ , "tainted": ["test"]
+ , "artifacts_doc":
+ [ "result: the result of this test (\"PASS\" or \"FAIL\"); useful for"
+ , " generating test reports."
+ , "stdout/stderr: Any output the invocation of the test binary produced on"
+ , " the respective file descriptor"
+ , "work: In this directory, all the files specified to \"keep\" are staged"
+ , "time-start/time-stop: The time (decimally coded) in seconds since the"
+ , " epoch when the test invocation started and ended."
+ ]
+ , "runfiles_doc":
+ [ "A tree consisting of the artifacts staged at the name of the test."
+ , "As the built-in \"install\" rule only takes the runfiles of its \"deps\""
+ , "argument, this gives an easy way of defining test suites."
+ ]
+ , "implicit":
+ { "runner": ["with_remote_test_runner.py"]
+ , "summarizer": [["@", "rules", "shell/test", "test_summary.py"]]
+ , "just": [["", "just"]]
+ }
+ , "imports":
+ { "test-result": "test-result"
+ , "action": "test-action"
+ , "stage": ["@", "rules", "", "stage_singleton_field"]
+ , "host transition": ["@", "rules", "transitions", "for host"]
+ }
+ , "config_transitions":
+ { "deps": [{"type": "CALL_EXPRESSION", "name": "host transition"}]
+ , "just": [{"type": "CALL_EXPRESSION", "name": "host transition"}]
+ , "runner": [{"type": "CALL_EXPRESSION", "name": "host transition"}]
+ }
+ , "expression":
+ { "type": "let*"
+ , "bindings":
+ [ [ "test.sh"
+ , { "type": "context"
+ , "msg": "Expecting 'test' to specify precisely one file containing a shell script"
+ , "$1":
+ { "type": "let*"
+ , "bindings": [["fieldname", "test"], ["location", "test.sh"]]
+ , "body": {"type": "CALL_EXPRESSION", "name": "stage"}
+ }
+ }
+ ]
+ , [ "name"
+ , { "type": "assert_non_empty"
+ , "msg": "Have to provide a non-empty name for the test (e.g., for result staging)"
+ , "$1": {"type": "join", "$1": {"type": "FIELD", "name": "name"}}
+ }
+ ]
+ , ["keep", {"type": "FIELD", "name": "keep"}]
+ , ["deps-fieldname", "deps"]
+ , ["transition", {"type": "CALL_EXPRESSION", "name": "host transition"}]
+ ]
+ , "body":
+ { "type": "if"
+ , "cond": {"type": "var", "name": "RUNS_PER_TEST"}
+ , "else": {"type": "CALL_EXPRESSION", "name": "test-result"}
+ , "then":
+ { "type": "let*"
+ , "bindings":
+ [ [ "attempts"
+ , { "type": "map_union"
+ , "$1":
+ { "type": "foreach"
+ , "var": "ATTEMPT"
+ , "range":
+ { "type": "range"
+ , "$1": {"type": "var", "name": "RUNS_PER_TEST"}
+ }
+ , "body":
+ { "type": "singleton_map"
+ , "key": {"type": "var", "name": "ATTEMPT"}
+ , "value":
+ { "type": "TREE"
+ , "$1": {"type": "CALL_EXPRESSION", "name": "action"}
+ }
+ }
+ }
+ }
+ ]
+ , [ "summarizer"
+ , { "type": "map_union"
+ , "$1":
+ { "type": "foreach"
+ , "var": "x"
+ , "range": {"type": "FIELD", "name": "summarizer"}
+ , "body":
+ { "type": "map_union"
+ , "$1":
+ { "type": "foreach"
+ , "var": "x"
+ , "range":
+ { "type": "values"
+ , "$1":
+ { "type": "DEP_ARTIFACTS"
+ , "dep": {"type": "var", "name": "x"}
+ }
+ }
+ , "body":
+ { "type": "singleton_map"
+ , "key": "summarizer"
+ , "value": {"type": "var", "name": "x"}
+ }
+ }
+ }
+ }
+ }
+ ]
+ , [ "summary"
+ , { "type": "ACTION"
+ , "inputs":
+ { "type": "map_union"
+ , "$1":
+ [ {"type": "var", "name": "attempts"}
+ , {"type": "var", "name": "summarizer"}
+ ]
+ }
+ , "outs":
+ ["stdout", "stderr", "result", "time-start", "time-stop"]
+ , "cmd": ["./summarizer"]
+ }
+ ]
+ , [ "artifacts"
+ , { "type": "map_union"
+ , "$1":
+ [ {"type": "var", "name": "summary"}
+ , { "type": "singleton_map"
+ , "key": "work"
+ , "value":
+ {"type": "TREE", "$1": {"type": "var", "name": "attempts"}}
+ }
+ ]
+ }
+ ]
+ , [ "runfiles"
+ , { "type": "singleton_map"
+ , "key": {"type": "var", "name": "name"}
+ , "value":
+ {"type": "TREE", "$1": {"type": "var", "name": "artifacts"}}
+ }
+ ]
+ ]
+ , "body":
+ { "type": "RESULT"
+ , "artifacts": {"type": "var", "name": "artifacts"}
+ , "runfiles": {"type": "var", "name": "runfiles"}
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/test/end-to-end/with_remote_test_runner.py b/test/end-to-end/with_remote_test_runner.py
new file mode 100755
index 00000000..69349b25
--- /dev/null
+++ b/test/end-to-end/with_remote_test_runner.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+# Copyright 2023 Huawei Cloud Computing Technology Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import sys
+import subprocess
+import time
+
+time_start = time.time()
+time_stop = 0
+result = "UNKNOWN"
+stderr = ""
+stdout = ""
+remotestdout = ""
+remotestderr = ""
+
+
+def dump_results():
+ with open("result", "w") as f:
+ f.write("%s\n" % (result, ))
+ with open("time-start", "w") as f:
+ f.write("%d\n" % (time_start, ))
+ with open("time-stop", "w") as f:
+ f.write("%d\n" % (time_stop, ))
+ with open("stdout", "w") as f:
+ f.write("%s\n" % (stdout, ))
+ with open("stderr", "w") as f:
+ f.write("%s\n" % (stderr, ))
+ with open("remotestdout", "w") as f:
+ f.write("%s\n" % (remotestdout, ))
+ with open("remotestderr", "w") as f:
+ f.write("%s\n" % (remotestderr, ))
+
+
+dump_results()
+
+TEMP_DIR = os.path.realpath("scratch")
+os.makedirs(TEMP_DIR, exist_ok=True)
+
+WORK_DIR = os.path.realpath("work")
+os.makedirs(WORK_DIR, exist_ok=True)
+
+REMOTE_DIR = os.path.realpath("remote")
+os.makedirs(REMOTE_DIR, exist_ok=True)
+REMOTE_INFO = os.path.join(REMOTE_DIR, "info.json")
+REMOTE_LBR = os.path.join(REMOTE_DIR, "build-root")
+
+remote_cmd = [
+ "./just", "execute",
+ "--info-file", REMOTE_INFO,
+ "--local-build-root", REMOTE_LBR,
+ "--log-limit", "5", "--plain-log",
+]
+
+compatible = json.loads(sys.argv[1])
+if compatible:
+ remote_cmd.append("--compatible")
+
+
+remote_proc = subprocess.Popen(
+ remote_cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+)
+
+while not os.path.exists(REMOTE_INFO):
+ time.sleep(1)
+
+with open(REMOTE_INFO) as f:
+ info = json.load(f)
+
+REMOTE_EXECUTION_ADDRESS = "%s:%d" % (info["interface"], info["port"])
+
+ENV = dict(os.environ,
+ TEST_TMPDIR=TEMP_DIR, TMPDIR=TEMP_DIR,
+ REMOTE_EXECUTION_ADDRESS=REMOTE_EXECUTION_ADDRESS)
+
+if compatible:
+ ENV["COMPATIBLE"] = "YES"
+elif "COMPATIBLE" in ENV:
+ del ENV["COMPATIBLE"]
+
+for k in ["TLS_CA_CERT", "TLS_CLIENT_CERT", "TLS_CLIENT_KEY"]:
+ if k in ENV:
+ del ENV[k]
+
+
+time_start = time.time()
+ret = subprocess.run(["sh", "../test.sh"],
+ cwd=WORK_DIR,
+ env=ENV,
+ capture_output=True)
+
+time_stop = time.time()
+result = "PASS" if ret.returncode == 0 else "FAIL"
+stdout = ret.stdout.decode("utf-8")
+stderr = ret.stderr.decode("utf-8")
+remote_proc.terminate()
+rout, rerr = remote_proc.communicate()
+remotestdout = rout.decode("utf-8")
+remotestderr = rerr.decode("utf-8")
+
+dump_results()
+
+for f in sys.argv[2:]:
+ keep_file = os.path.join(WORKDIR, f)
+ if not os.path.exists(keep_file):
+ open(keep_file, "a").close()
+
+
+if result != "PASS": exit(1)