[llvm] Change update_xxx_checks to continue on error when processing mutliple inputs (PR #137728)
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 29 11:41:35 PDT 2025
https://github.com/MatzeB updated https://github.com/llvm/llvm-project/pull/137728
>From 8d0800f40ec15cc079f2f98ea54b6751e62d1b91 Mon Sep 17 00:00:00 2001
From: Matthias Braun <matze at braunis.de>
Date: Mon, 28 Apr 2025 16:18:08 -0700
Subject: [PATCH 1/2] Change update_xxx_checks to continue on error when
processing mutliple inputs
---
llvm/utils/update_analyze_test_checks.py | 316 ++++++-------
llvm/utils/update_cc_test_checks.py | 504 ++++++++++----------
llvm/utils/update_llc_test_checks.py | 462 ++++++++++---------
llvm/utils/update_mc_test_checks.py | 563 ++++++++++++-----------
llvm/utils/update_mca_test_checks.py | 11 +-
llvm/utils/update_mir_test_checks.py | 13 +-
llvm/utils/update_test_checks.py | 527 ++++++++++-----------
7 files changed, 1223 insertions(+), 1173 deletions(-)
diff --git a/llvm/utils/update_analyze_test_checks.py b/llvm/utils/update_analyze_test_checks.py
index d356ebead0d81..6ecf51b88d819 100755
--- a/llvm/utils/update_analyze_test_checks.py
+++ b/llvm/utils/update_analyze_test_checks.py
@@ -31,6 +31,8 @@
from __future__ import print_function
+from sys import stderr
+from traceback import print_exc
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import sys
@@ -39,6 +41,158 @@
from UpdateTestChecks import common
+def update_test(opt_basename: str, ti: common.TestInfo):
+ triple_in_ir = None
+ for l in ti.input_lines:
+ m = common.TRIPLE_IR_RE.match(l)
+ if m:
+ triple_in_ir = m.groups()[0]
+ break
+
+ prefix_list = []
+ for l in ti.run_lines:
+ if "|" not in l:
+ common.warn("Skipping unparsable RUN line: " + l)
+ continue
+
+ (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split("|", 1)])
+ common.verify_filecheck_prefixes(filecheck_cmd)
+
+ if not tool_cmd.startswith(opt_basename + " "):
+ common.warn("WSkipping non-%s RUN line: %s" % (opt_basename, l))
+ continue
+
+ if not filecheck_cmd.startswith("FileCheck "):
+ common.warn("Skipping non-FileChecked RUN line: " + l)
+ continue
+
+ tool_cmd_args = tool_cmd[len(opt_basename) :].strip()
+ tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
+ check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+ # FIXME: We should use multiple check prefixes to common check lines. For
+ # now, we just ignore all but the last.
+ prefix_list.append((check_prefixes, tool_cmd_args))
+
+ ginfo = common.make_analyze_generalizer(version=1)
+ builder = common.FunctionTestBuilder(
+ run_list=prefix_list,
+ flags=type(
+ "",
+ (object,),
+ {
+ "verbose": ti.args.verbose,
+ "filters": ti.args.filters,
+ "function_signature": False,
+ "check_attributes": False,
+ "replace_value_regex": [],
+ },
+ ),
+ scrubber_args=[],
+ path=ti.path,
+ ginfo=ginfo,
+ )
+
+ for prefixes, opt_args in prefix_list:
+ common.debug("Extracted opt cmd:", opt_basename, opt_args, file=sys.stderr)
+ common.debug("Extracted FileCheck prefixes:", str(prefixes), file=sys.stderr)
+
+ raw_tool_outputs = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path)
+
+ if re.search(r"Printing analysis ", raw_tool_outputs) is not None:
+ # Split analysis outputs by "Printing analysis " declarations.
+ for raw_tool_output in re.split(r"Printing analysis ", raw_tool_outputs):
+ builder.process_run_line(
+ common.ANALYZE_FUNCTION_RE,
+ common.scrub_body,
+ raw_tool_output,
+ prefixes,
+ )
+ elif (
+ re.search(r"(LV|LDist): Checking a loop in ", raw_tool_outputs) is not None
+ ):
+ for raw_tool_output in re.split(
+ r"(LV|LDist): Checking a loop in ", raw_tool_outputs
+ ):
+ builder.process_run_line(
+ common.LOOP_PASS_DEBUG_RE,
+ common.scrub_body,
+ raw_tool_output,
+ prefixes,
+ )
+ else:
+ common.warn("Don't know how to deal with this output")
+ continue
+
+ builder.processed_prefixes(prefixes)
+
+ func_dict = builder.finish_and_get_func_dict()
+ is_in_function = False
+ is_in_function_start = False
+ prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
+ common.debug("Rewriting FileCheck prefixes:", str(prefix_set), file=sys.stderr)
+ output_lines = []
+
+ generated_prefixes = []
+ for input_info in ti.iterlines(output_lines):
+ input_line = input_info.line
+ args = input_info.args
+ if is_in_function_start:
+ if input_line == "":
+ continue
+ if input_line.lstrip().startswith(";"):
+ m = common.CHECK_RE.match(input_line)
+ if not m or m.group(1) not in prefix_set:
+ output_lines.append(input_line)
+ continue
+
+ # Print out the various check lines here.
+ generated_prefixes.extend(
+ common.add_analyze_checks(
+ output_lines,
+ ";",
+ prefix_list,
+ func_dict,
+ func_name,
+ ginfo,
+ is_filtered=builder.is_filtered(),
+ )
+ )
+ is_in_function_start = False
+
+ if is_in_function:
+ if common.should_add_line_to_output(input_line, prefix_set):
+ # This input line of the function body will go as-is into the output.
+ output_lines.append(input_line)
+ else:
+ continue
+ if input_line.strip() == "}":
+ is_in_function = False
+ continue
+
+ # If it's outside a function, it just gets copied to the output.
+ output_lines.append(input_line)
+
+ m = common.IR_FUNCTION_RE.match(input_line)
+ if not m:
+ continue
+ func_name = m.group(1)
+ if ti.args.function is not None and func_name != ti.args.function:
+ # When filtering on a specific function, skip all others.
+ continue
+ is_in_function = is_in_function_start = True
+
+ if ti.args.gen_unused_prefix_body:
+ output_lines.extend(
+ ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
+ )
+
+ common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+
+ with open(ti.path, "wb") as f:
+ f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+
def main():
from argparse import RawTextHelpFormatter
@@ -61,163 +215,17 @@ def main():
common.error("Unexpected opt name: " + opt_basename)
sys.exit(1)
+ returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
- triple_in_ir = None
- for l in ti.input_lines:
- m = common.TRIPLE_IR_RE.match(l)
- if m:
- triple_in_ir = m.groups()[0]
- break
-
- prefix_list = []
- for l in ti.run_lines:
- if "|" not in l:
- common.warn("Skipping unparsable RUN line: " + l)
- continue
-
- (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split("|", 1)])
- common.verify_filecheck_prefixes(filecheck_cmd)
-
- if not tool_cmd.startswith(opt_basename + " "):
- common.warn("WSkipping non-%s RUN line: %s" % (opt_basename, l))
- continue
-
- if not filecheck_cmd.startswith("FileCheck "):
- common.warn("Skipping non-FileChecked RUN line: " + l)
- continue
-
- tool_cmd_args = tool_cmd[len(opt_basename) :].strip()
- tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
- check_prefixes = common.get_check_prefixes(filecheck_cmd)
-
- # FIXME: We should use multiple check prefixes to common check lines. For
- # now, we just ignore all but the last.
- prefix_list.append((check_prefixes, tool_cmd_args))
-
- ginfo = common.make_analyze_generalizer(version=1)
- builder = common.FunctionTestBuilder(
- run_list=prefix_list,
- flags=type(
- "",
- (object,),
- {
- "verbose": ti.args.verbose,
- "filters": ti.args.filters,
- "function_signature": False,
- "check_attributes": False,
- "replace_value_regex": [],
- },
- ),
- scrubber_args=[],
- path=ti.path,
- ginfo=ginfo,
- )
-
- for prefixes, opt_args in prefix_list:
- common.debug("Extracted opt cmd:", opt_basename, opt_args, file=sys.stderr)
- common.debug(
- "Extracted FileCheck prefixes:", str(prefixes), file=sys.stderr
- )
-
- raw_tool_outputs = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path)
-
- if re.search(r"Printing analysis ", raw_tool_outputs) is not None:
- # Split analysis outputs by "Printing analysis " declarations.
- for raw_tool_output in re.split(
- r"Printing analysis ", raw_tool_outputs
- ):
- builder.process_run_line(
- common.ANALYZE_FUNCTION_RE,
- common.scrub_body,
- raw_tool_output,
- prefixes,
- )
- elif (
- re.search(r"(LV|LDist): Checking a loop in ", raw_tool_outputs)
- is not None
- ):
- for raw_tool_output in re.split(
- r"(LV|LDist): Checking a loop in ", raw_tool_outputs
- ):
- builder.process_run_line(
- common.LOOP_PASS_DEBUG_RE,
- common.scrub_body,
- raw_tool_output,
- prefixes,
- )
- else:
- common.warn("Don't know how to deal with this output")
- continue
-
- builder.processed_prefixes(prefixes)
-
- func_dict = builder.finish_and_get_func_dict()
- is_in_function = False
- is_in_function_start = False
- prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
- common.debug("Rewriting FileCheck prefixes:", str(prefix_set), file=sys.stderr)
- output_lines = []
-
- generated_prefixes = []
- for input_info in ti.iterlines(output_lines):
- input_line = input_info.line
- args = input_info.args
- if is_in_function_start:
- if input_line == "":
- continue
- if input_line.lstrip().startswith(";"):
- m = common.CHECK_RE.match(input_line)
- if not m or m.group(1) not in prefix_set:
- output_lines.append(input_line)
- continue
-
- # Print out the various check lines here.
- generated_prefixes.extend(
- common.add_analyze_checks(
- output_lines,
- ";",
- prefix_list,
- func_dict,
- func_name,
- ginfo,
- is_filtered=builder.is_filtered(),
- )
- )
- is_in_function_start = False
-
- if is_in_function:
- if common.should_add_line_to_output(input_line, prefix_set):
- # This input line of the function body will go as-is into the output.
- output_lines.append(input_line)
- else:
- continue
- if input_line.strip() == "}":
- is_in_function = False
- continue
-
- # If it's outside a function, it just gets copied to the output.
- output_lines.append(input_line)
-
- m = common.IR_FUNCTION_RE.match(input_line)
- if not m:
- continue
- func_name = m.group(1)
- if ti.args.function is not None and func_name != ti.args.function:
- # When filtering on a specific function, skip all others.
- continue
- is_in_function = is_in_function_start = True
-
- if ti.args.gen_unused_prefix_body:
- output_lines.extend(
- ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
- )
-
- common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
-
- with open(ti.path, "wb") as f:
- f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+ try:
+ update_test(opt_basename, ti)
+ except Exception:
+ stderr.write(f"Error: Failed to update test {ti.path}\n")
+ print_exc()
+ returncode = 1
+ return returncode
if __name__ == "__main__":
diff --git a/llvm/utils/update_cc_test_checks.py b/llvm/utils/update_cc_test_checks.py
index 7529d480b35dd..4102ee4ecbd22 100755
--- a/llvm/utils/update_cc_test_checks.py
+++ b/llvm/utils/update_cc_test_checks.py
@@ -14,6 +14,8 @@
from __future__ import print_function
+from sys import stderr
+from traceback import print_exc
import argparse
import collections
import json
@@ -302,257 +304,142 @@ def exec_run_line(exe):
sys.exit(3)
-def main():
- initial_args, parser = config()
- script_name = os.path.basename(__file__)
-
- for ti in common.itertests(
- initial_args.tests,
- parser,
- "utils/" + script_name,
- comment_prefix="//",
- argparse_callback=infer_dependent_args,
- ):
- # Build a list of filechecked and non-filechecked RUN lines.
- run_list = []
- line2func_list = collections.defaultdict(list)
-
- subs = {
- "%s": ti.path,
- "%t": tempfile.NamedTemporaryFile().name,
- "%S": os.path.dirname(ti.path),
- }
-
- for l in ti.run_lines:
- commands = [cmd.strip() for cmd in l.split("|")]
-
- triple_in_cmd = None
- m = common.TRIPLE_ARG_RE.search(commands[0])
- if m:
- triple_in_cmd = m.groups()[0]
-
- # Parse executable args.
- exec_args = shlex.split(commands[0])
- # Execute non-clang runline.
- if exec_args[0] not in SUBST:
- # Do lit-like substitutions.
- for s in subs:
- exec_args = [
- i.replace(s, subs[s]) if s in i else i for i in exec_args
- ]
- run_list.append((None, exec_args, None, None))
- continue
- # This is a clang runline, apply %clang substitution rule, do lit-like substitutions,
- # and append args.clang_args
- clang_args = exec_args
- clang_args[0:1] = SUBST[clang_args[0]]
- for s in subs:
- clang_args = [
- i.replace(s, subs[s]) if s in i else i for i in clang_args
- ]
- clang_args += ti.args.clang_args
-
- # Extract -check-prefix in FileCheck args
- filecheck_cmd = commands[-1]
- common.verify_filecheck_prefixes(filecheck_cmd)
- if not filecheck_cmd.startswith("FileCheck "):
- # Execute non-filechecked clang runline.
- exe = [ti.args.clang] + clang_args
- run_list.append((None, exe, None, None))
- continue
+def update_test(ti: common.TestInfo):
+ # Build a list of filechecked and non-filechecked RUN lines.
+ run_list = []
+ line2func_list = collections.defaultdict(list)
- check_prefixes = common.get_check_prefixes(filecheck_cmd)
- run_list.append((check_prefixes, clang_args, commands[1:-1], triple_in_cmd))
+ subs = {
+ "%s": ti.path,
+ "%t": tempfile.NamedTemporaryFile().name,
+ "%S": os.path.dirname(ti.path),
+ }
- # Execute clang, generate LLVM IR, and extract functions.
+ for l in ti.run_lines:
+ commands = [cmd.strip() for cmd in l.split("|")]
- # Store only filechecked runlines.
- filecheck_run_list = [i for i in run_list if i[0]]
- ginfo = common.make_ir_generalizer(
- ti.args.version, ti.args.check_globals == "none"
- )
- builder = common.FunctionTestBuilder(
- run_list=filecheck_run_list,
- flags=ti.args,
- scrubber_args=[],
- path=ti.path,
- ginfo=ginfo,
- )
+ triple_in_cmd = None
+ m = common.TRIPLE_ARG_RE.search(commands[0])
+ if m:
+ triple_in_cmd = m.groups()[0]
- for prefixes, args, extra_commands, triple_in_cmd in run_list:
- # Execute non-filechecked runline.
- if not prefixes:
- print(
- "NOTE: Executing non-FileChecked RUN line: " + " ".join(args),
- file=sys.stderr,
- )
- exec_run_line(args)
- continue
+ # Parse executable args.
+ exec_args = shlex.split(commands[0])
+ # Execute non-clang runline.
+ if exec_args[0] not in SUBST:
+ # Do lit-like substitutions.
+ for s in subs:
+ exec_args = [i.replace(s, subs[s]) if s in i else i for i in exec_args]
+ run_list.append((None, exec_args, None, None))
+ continue
+ # This is a clang runline, apply %clang substitution rule, do lit-like substitutions,
+ # and append args.clang_args
+ clang_args = exec_args
+ clang_args[0:1] = SUBST[clang_args[0]]
+ for s in subs:
+ clang_args = [i.replace(s, subs[s]) if s in i else i for i in clang_args]
+ clang_args += ti.args.clang_args
+
+ # Extract -check-prefix in FileCheck args
+ filecheck_cmd = commands[-1]
+ common.verify_filecheck_prefixes(filecheck_cmd)
+ if not filecheck_cmd.startswith("FileCheck "):
+ # Execute non-filechecked clang runline.
+ exe = [ti.args.clang] + clang_args
+ run_list.append((None, exe, None, None))
+ continue
+
+ check_prefixes = common.get_check_prefixes(filecheck_cmd)
+ run_list.append((check_prefixes, clang_args, commands[1:-1], triple_in_cmd))
+
+ # Execute clang, generate LLVM IR, and extract functions.
+
+ # Store only filechecked runlines.
+ filecheck_run_list = [i for i in run_list if i[0]]
+ ginfo = common.make_ir_generalizer(ti.args.version, ti.args.check_globals == "none")
+ builder = common.FunctionTestBuilder(
+ run_list=filecheck_run_list,
+ flags=ti.args,
+ scrubber_args=[],
+ path=ti.path,
+ ginfo=ginfo,
+ )
- clang_args = args
- common.debug("Extracted clang cmd: clang {}".format(clang_args))
- common.debug("Extracted FileCheck prefixes: {}".format(prefixes))
-
- # Invoke external tool and extract function bodies.
- raw_tool_output = common.invoke_tool(ti.args.clang, clang_args, ti.path)
- get_function_body(
- builder,
- ti.args,
- ti.path,
- clang_args,
- extra_commands,
- prefixes,
- raw_tool_output,
+ for prefixes, args, extra_commands, triple_in_cmd in run_list:
+ # Execute non-filechecked runline.
+ if not prefixes:
+ print(
+ "NOTE: Executing non-FileChecked RUN line: " + " ".join(args),
+ file=sys.stderr,
)
-
- # Invoke clang -Xclang -ast-dump=json to get mapping from start lines to
- # mangled names. Forward all clang args for now.
- for k, v in get_line2func_list(
- ti.args, clang_args, common.get_globals_name_prefix(raw_tool_output)
- ).items():
- line2func_list[k].extend(v)
-
- func_dict = builder.finish_and_get_func_dict()
- global_vars_seen_dict = {}
- prefix_set = set([prefix for p in filecheck_run_list for prefix in p[0]])
- output_lines = []
- has_checked_pre_function_globals = False
-
- include_generated_funcs = common.find_arg_in_test(
- ti,
- lambda args: ti.args.include_generated_funcs,
- "--include-generated-funcs",
- True,
+ exec_run_line(args)
+ continue
+
+ clang_args = args
+ common.debug("Extracted clang cmd: clang {}".format(clang_args))
+ common.debug("Extracted FileCheck prefixes: {}".format(prefixes))
+
+ # Invoke external tool and extract function bodies.
+ raw_tool_output = common.invoke_tool(ti.args.clang, clang_args, ti.path)
+ get_function_body(
+ builder,
+ ti.args,
+ ti.path,
+ clang_args,
+ extra_commands,
+ prefixes,
+ raw_tool_output,
)
- generated_prefixes = []
- if include_generated_funcs:
- # Generate the appropriate checks for each function. We need to emit
- # these in the order according to the generated output so that CHECK-LABEL
- # works properly. func_order provides that.
-
- # It turns out that when clang generates functions (for example, with
- # -fopenmp), it can sometimes cause functions to be re-ordered in the
- # output, even functions that exist in the source file. Therefore we
- # can't insert check lines before each source function and instead have to
- # put them at the end. So the first thing to do is dump out the source
- # lines.
- common.dump_input_lines(output_lines, ti, prefix_set, "//")
-
- # Now generate all the checks.
- def check_generator(my_output_lines, prefixes, func):
- return common.add_ir_checks(
- my_output_lines,
- "//",
- prefixes,
- func_dict,
- func,
- False,
- ti.args.function_signature,
- ginfo,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- )
- if ti.args.check_globals != 'none':
- generated_prefixes.extend(
- common.add_global_checks(
- builder.global_var_dict(),
- "//",
- run_list,
- output_lines,
- ginfo,
- global_vars_seen_dict,
- False,
- True,
- ti.args.check_globals,
- )
- )
- generated_prefixes.extend(
- common.add_checks_at_end(
- output_lines,
- filecheck_run_list,
- builder.func_order(),
- "//",
- lambda my_output_lines, prefixes, func: check_generator(
- my_output_lines, prefixes, func
- ),
- )
+ # Invoke clang -Xclang -ast-dump=json to get mapping from start lines to
+ # mangled names. Forward all clang args for now.
+ for k, v in get_line2func_list(
+ ti.args, clang_args, common.get_globals_name_prefix(raw_tool_output)
+ ).items():
+ line2func_list[k].extend(v)
+
+ func_dict = builder.finish_and_get_func_dict()
+ global_vars_seen_dict = {}
+ prefix_set = set([prefix for p in filecheck_run_list for prefix in p[0]])
+ output_lines = []
+ has_checked_pre_function_globals = False
+
+ include_generated_funcs = common.find_arg_in_test(
+ ti,
+ lambda args: ti.args.include_generated_funcs,
+ "--include-generated-funcs",
+ True,
+ )
+ generated_prefixes = []
+ if include_generated_funcs:
+ # Generate the appropriate checks for each function. We need to emit
+ # these in the order according to the generated output so that CHECK-LABEL
+ # works properly. func_order provides that.
+
+ # It turns out that when clang generates functions (for example, with
+ # -fopenmp), it can sometimes cause functions to be re-ordered in the
+ # output, even functions that exist in the source file. Therefore we
+ # can't insert check lines before each source function and instead have to
+ # put them at the end. So the first thing to do is dump out the source
+ # lines.
+ common.dump_input_lines(output_lines, ti, prefix_set, "//")
+
+ # Now generate all the checks.
+ def check_generator(my_output_lines, prefixes, func):
+ return common.add_ir_checks(
+ my_output_lines,
+ "//",
+ prefixes,
+ func_dict,
+ func,
+ False,
+ ti.args.function_signature,
+ ginfo,
+ global_vars_seen_dict,
+ is_filtered=builder.is_filtered(),
)
- else:
- # Normal mode. Put checks before each source function.
- for line_info in ti.iterlines(output_lines):
- idx = line_info.line_number
- line = line_info.line
- args = line_info.args
- include_line = True
- m = common.CHECK_RE.match(line)
- if m and m.group(1) in prefix_set:
- continue # Don't append the existing CHECK lines
- # Skip special separator comments added by commmon.add_global_checks.
- if line.strip() == "//" + common.SEPARATOR:
- continue
- if idx in line2func_list:
- added = set()
- for spell, mangled, search in line2func_list[idx]:
- # One line may contain multiple function declarations.
- # Skip if the mangled name has been added before.
- # The line number may come from an included file, we simply require
- # the search string (normally the function's spelling name, but is
- # the class's spelling name for class specializations) to appear on
- # the line to exclude functions from other files.
- if mangled in added or search not in line:
- continue
- if args.functions is None or any(
- re.search(regex, spell) for regex in args.functions
- ):
- last_line = output_lines[-1].strip()
- while last_line == "//":
- # Remove the comment line since we will generate a new comment
- # line as part of common.add_ir_checks()
- output_lines.pop()
- last_line = output_lines[-1].strip()
- if (
- ti.args.check_globals != 'none'
- and not has_checked_pre_function_globals
- ):
- generated_prefixes.extend(
- common.add_global_checks(
- builder.global_var_dict(),
- "//",
- run_list,
- output_lines,
- ginfo,
- global_vars_seen_dict,
- False,
- True,
- ti.args.check_globals,
- )
- )
- has_checked_pre_function_globals = True
- if added:
- output_lines.append("//")
- added.add(mangled)
- generated_prefixes.extend(
- common.add_ir_checks(
- output_lines,
- "//",
- filecheck_run_list,
- func_dict,
- mangled,
- False,
- args.function_signature,
- ginfo,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- )
- )
- if line.rstrip("\n") == "//":
- include_line = False
- if include_line:
- output_lines.append(line.rstrip("\n"))
-
- if ti.args.check_globals != 'none':
+ if ti.args.check_globals != "none":
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
@@ -562,19 +449,138 @@ def check_generator(my_output_lines, prefixes, func):
ginfo,
global_vars_seen_dict,
False,
- False,
+ True,
ti.args.check_globals,
)
)
- if ti.args.gen_unused_prefix_body:
- output_lines.extend(
- ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
+ generated_prefixes.extend(
+ common.add_checks_at_end(
+ output_lines,
+ filecheck_run_list,
+ builder.func_order(),
+ "//",
+ lambda my_output_lines, prefixes, func: check_generator(
+ my_output_lines, prefixes, func
+ ),
)
- common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
- with open(ti.path, "wb") as f:
- f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+ )
+ else:
+ # Normal mode. Put checks before each source function.
+ for line_info in ti.iterlines(output_lines):
+ idx = line_info.line_number
+ line = line_info.line
+ args = line_info.args
+ include_line = True
+ m = common.CHECK_RE.match(line)
+ if m and m.group(1) in prefix_set:
+ continue # Don't append the existing CHECK lines
+ # Skip special separator comments added by commmon.add_global_checks.
+ if line.strip() == "//" + common.SEPARATOR:
+ continue
+ if idx in line2func_list:
+ added = set()
+ for spell, mangled, search in line2func_list[idx]:
+ # One line may contain multiple function declarations.
+ # Skip if the mangled name has been added before.
+ # The line number may come from an included file, we simply require
+ # the search string (normally the function's spelling name, but is
+ # the class's spelling name for class specializations) to appear on
+ # the line to exclude functions from other files.
+ if mangled in added or search not in line:
+ continue
+ if args.functions is None or any(
+ re.search(regex, spell) for regex in args.functions
+ ):
+ last_line = output_lines[-1].strip()
+ while last_line == "//":
+ # Remove the comment line since we will generate a new comment
+ # line as part of common.add_ir_checks()
+ output_lines.pop()
+ last_line = output_lines[-1].strip()
+ if (
+ ti.args.check_globals != "none"
+ and not has_checked_pre_function_globals
+ ):
+ generated_prefixes.extend(
+ common.add_global_checks(
+ builder.global_var_dict(),
+ "//",
+ run_list,
+ output_lines,
+ ginfo,
+ global_vars_seen_dict,
+ False,
+ True,
+ ti.args.check_globals,
+ )
+ )
+ has_checked_pre_function_globals = True
+ if added:
+ output_lines.append("//")
+ added.add(mangled)
+ generated_prefixes.extend(
+ common.add_ir_checks(
+ output_lines,
+ "//",
+ filecheck_run_list,
+ func_dict,
+ mangled,
+ False,
+ args.function_signature,
+ ginfo,
+ global_vars_seen_dict,
+ is_filtered=builder.is_filtered(),
+ )
+ )
+ if line.rstrip("\n") == "//":
+ include_line = False
+
+ if include_line:
+ output_lines.append(line.rstrip("\n"))
+
+ if ti.args.check_globals != "none":
+ generated_prefixes.extend(
+ common.add_global_checks(
+ builder.global_var_dict(),
+ "//",
+ run_list,
+ output_lines,
+ ginfo,
+ global_vars_seen_dict,
+ False,
+ False,
+ ti.args.check_globals,
+ )
+ )
+ if ti.args.gen_unused_prefix_body:
+ output_lines.extend(
+ ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
+ )
+ common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+ with open(ti.path, "wb") as f:
+ f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
- return 0
+def main():
+ initial_args, parser = config()
+ script_name = os.path.basename(__file__)
+
+ returncode = 0
+ for ti in common.itertests(
+ initial_args.tests,
+ parser,
+ "utils/" + script_name,
+ comment_prefix="//",
+ argparse_callback=infer_dependent_args,
+ ):
+ try:
+ update_test(ti)
+ except Exception:
+ stderr.write(f"Error: Failed to update test {ti.path}\n")
+ print_exc()
+ returncode = 1
+
+ return returncode
if __name__ == "__main__":
diff --git a/llvm/utils/update_llc_test_checks.py b/llvm/utils/update_llc_test_checks.py
index 3e9380d95e3f6..07216d7dbfbb2 100755
--- a/llvm/utils/update_llc_test_checks.py
+++ b/llvm/utils/update_llc_test_checks.py
@@ -9,8 +9,11 @@
from __future__ import print_function
+from sys import stderr
+from traceback import print_exc
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
+import sys
from UpdateTestChecks import common
@@ -21,6 +24,232 @@
]
+def update_test(ti: common.TestInfo):
+ triple_in_ir = None
+ for l in ti.input_lines:
+ m = common.TRIPLE_IR_RE.match(l)
+ if m:
+ triple_in_ir = m.groups()[0]
+ break
+
+ run_list = []
+ for l in ti.run_lines:
+ if "|" not in l:
+ common.warn("Skipping unparsable RUN line: " + l)
+ continue
+
+ commands = [cmd.strip() for cmd in l.split("|")]
+ assert len(commands) >= 2
+ preprocess_cmd = None
+ if len(commands) > 2:
+ preprocess_cmd = " | ".join(commands[:-2])
+ llc_cmd = commands[-2]
+ filecheck_cmd = commands[-1]
+ llc_tool = llc_cmd.split(" ")[0]
+
+ triple_in_cmd = None
+ m = common.TRIPLE_ARG_RE.search(llc_cmd)
+ if m:
+ triple_in_cmd = m.groups()[0]
+
+ march_in_cmd = ti.args.default_march
+ m = common.MARCH_ARG_RE.search(llc_cmd)
+ if m:
+ march_in_cmd = m.groups()[0]
+
+ m = common.DEBUG_ONLY_ARG_RE.search(llc_cmd)
+ if m and m.groups()[0] == "isel":
+ from UpdateTestChecks import isel as output_type
+ else:
+ from UpdateTestChecks import asm as output_type
+
+ common.verify_filecheck_prefixes(filecheck_cmd)
+
+ llc_like_tools = LLC_LIKE_TOOLS[:]
+ if ti.args.tool:
+ llc_like_tools.append(ti.args.tool)
+ if llc_tool not in llc_like_tools:
+ common.warn("Skipping non-llc RUN line: " + l)
+ continue
+
+ if not filecheck_cmd.startswith("FileCheck "):
+ common.warn("Skipping non-FileChecked RUN line: " + l)
+ continue
+
+ llc_cmd_args = llc_cmd[len(llc_tool) :].strip()
+ llc_cmd_args = llc_cmd_args.replace("< %s", "").replace("%s", "").strip()
+ if ti.path.endswith(".mir"):
+ llc_cmd_args += " -x mir"
+ check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+ # FIXME: We should use multiple check prefixes to common check lines. For
+ # now, we just ignore all but the last.
+ run_list.append(
+ (
+ check_prefixes,
+ llc_tool,
+ llc_cmd_args,
+ preprocess_cmd,
+ triple_in_cmd,
+ march_in_cmd,
+ )
+ )
+
+ if ti.path.endswith(".mir"):
+ check_indent = " "
+ else:
+ check_indent = ""
+
+ ginfo = common.make_asm_generalizer(version=1)
+ builder = common.FunctionTestBuilder(
+ run_list=run_list,
+ flags=type(
+ "",
+ (object,),
+ {
+ "verbose": ti.args.verbose,
+ "filters": ti.args.filters,
+ "function_signature": False,
+ "check_attributes": False,
+ "replace_value_regex": [],
+ },
+ ),
+ scrubber_args=[ti.args],
+ path=ti.path,
+ ginfo=ginfo,
+ )
+
+ for (
+ prefixes,
+ llc_tool,
+ llc_args,
+ preprocess_cmd,
+ triple_in_cmd,
+ march_in_cmd,
+ ) in run_list:
+ common.debug("Extracted LLC cmd:", llc_tool, llc_args)
+ common.debug("Extracted FileCheck prefixes:", str(prefixes))
+
+ raw_tool_output = common.invoke_tool(
+ ti.args.llc_binary or llc_tool,
+ llc_args,
+ ti.path,
+ preprocess_cmd,
+ verbose=ti.args.verbose,
+ )
+ triple = triple_in_cmd or triple_in_ir
+ if not triple:
+ triple = common.get_triple_from_march(march_in_cmd)
+
+ scrubber, function_re = output_type.get_run_handler(triple)
+ builder.process_run_line(function_re, scrubber, raw_tool_output, prefixes)
+ builder.processed_prefixes(prefixes)
+
+ func_dict = builder.finish_and_get_func_dict()
+ global_vars_seen_dict = {}
+
+ is_in_function = False
+ is_in_function_start = False
+ func_name = None
+ prefix_set = set([prefix for p in run_list for prefix in p[0]])
+ common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
+ output_lines = []
+
+ include_generated_funcs = common.find_arg_in_test(
+ ti,
+ lambda args: ti.args.include_generated_funcs,
+ "--include-generated-funcs",
+ True,
+ )
+
+ generated_prefixes = []
+ if include_generated_funcs:
+ # Generate the appropriate checks for each function. We need to emit
+ # these in the order according to the generated output so that CHECK-LABEL
+ # works properly. func_order provides that.
+
+ # We can't predict where various passes might insert functions so we can't
+ # be sure the input function order is maintained. Therefore, first spit
+ # out all the source lines.
+ common.dump_input_lines(output_lines, ti, prefix_set, ";")
+
+ # Now generate all the checks.
+ generated_prefixes = common.add_checks_at_end(
+ output_lines,
+ run_list,
+ builder.func_order(),
+ check_indent + ";",
+ lambda my_output_lines, prefixes, func: output_type.add_checks(
+ my_output_lines,
+ check_indent + ";",
+ prefixes,
+ func_dict,
+ func,
+ ginfo,
+ global_vars_seen_dict,
+ is_filtered=builder.is_filtered(),
+ ),
+ )
+ else:
+ for input_info in ti.iterlines(output_lines):
+ input_line = input_info.line
+ args = input_info.args
+ if is_in_function_start:
+ if input_line == "":
+ continue
+ if input_line.lstrip().startswith(";"):
+ m = common.CHECK_RE.match(input_line)
+ if not m or m.group(1) not in prefix_set:
+ output_lines.append(input_line)
+ continue
+
+ # Print out the various check lines here.
+ generated_prefixes.extend(
+ output_type.add_checks(
+ output_lines,
+ check_indent + ";",
+ run_list,
+ func_dict,
+ func_name,
+ ginfo,
+ global_vars_seen_dict,
+ is_filtered=builder.is_filtered(),
+ )
+ )
+ is_in_function_start = False
+
+ if is_in_function:
+ if common.should_add_line_to_output(input_line, prefix_set):
+ # This input line of the function body will go as-is into the output.
+ output_lines.append(input_line)
+ else:
+ continue
+ if input_line.strip() == "}":
+ is_in_function = False
+ continue
+
+ # If it's outside a function, it just gets copied to the output.
+ output_lines.append(input_line)
+
+ m = common.IR_FUNCTION_RE.match(input_line)
+ if not m:
+ continue
+ func_name = m.group(1)
+ if args.function is not None and func_name != args.function:
+ # When filtering on a specific function, skip all others.
+ continue
+ is_in_function = is_in_function_start = True
+
+ if ti.args.gen_unused_prefix_body:
+ output_lines.extend(
+ ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
+ )
+
+ common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+ with open(ti.path, "wb") as f:
+ f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
@@ -71,233 +300,18 @@ def main():
script_name = os.path.basename(__file__)
+ returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
- triple_in_ir = None
- for l in ti.input_lines:
- m = common.TRIPLE_IR_RE.match(l)
- if m:
- triple_in_ir = m.groups()[0]
- break
-
- run_list = []
- for l in ti.run_lines:
- if "|" not in l:
- common.warn("Skipping unparsable RUN line: " + l)
- continue
-
- commands = [cmd.strip() for cmd in l.split("|")]
- assert len(commands) >= 2
- preprocess_cmd = None
- if len(commands) > 2:
- preprocess_cmd = " | ".join(commands[:-2])
- llc_cmd = commands[-2]
- filecheck_cmd = commands[-1]
- llc_tool = llc_cmd.split(" ")[0]
-
- triple_in_cmd = None
- m = common.TRIPLE_ARG_RE.search(llc_cmd)
- if m:
- triple_in_cmd = m.groups()[0]
-
- march_in_cmd = ti.args.default_march
- m = common.MARCH_ARG_RE.search(llc_cmd)
- if m:
- march_in_cmd = m.groups()[0]
-
- m = common.DEBUG_ONLY_ARG_RE.search(llc_cmd)
- if m and m.groups()[0] == "isel":
- from UpdateTestChecks import isel as output_type
- else:
- from UpdateTestChecks import asm as output_type
-
- common.verify_filecheck_prefixes(filecheck_cmd)
-
- llc_like_tools = LLC_LIKE_TOOLS[:]
- if ti.args.tool:
- llc_like_tools.append(ti.args.tool)
- if llc_tool not in llc_like_tools:
- common.warn("Skipping non-llc RUN line: " + l)
- continue
-
- if not filecheck_cmd.startswith("FileCheck "):
- common.warn("Skipping non-FileChecked RUN line: " + l)
- continue
-
- llc_cmd_args = llc_cmd[len(llc_tool) :].strip()
- llc_cmd_args = llc_cmd_args.replace("< %s", "").replace("%s", "").strip()
- if ti.path.endswith(".mir"):
- llc_cmd_args += " -x mir"
- check_prefixes = common.get_check_prefixes(filecheck_cmd)
-
- # FIXME: We should use multiple check prefixes to common check lines. For
- # now, we just ignore all but the last.
- run_list.append(
- (
- check_prefixes,
- llc_tool,
- llc_cmd_args,
- preprocess_cmd,
- triple_in_cmd,
- march_in_cmd,
- )
- )
-
- if ti.path.endswith(".mir"):
- check_indent = " "
- else:
- check_indent = ""
-
- ginfo = common.make_asm_generalizer(version=1)
- builder = common.FunctionTestBuilder(
- run_list=run_list,
- flags=type(
- "",
- (object,),
- {
- "verbose": ti.args.verbose,
- "filters": ti.args.filters,
- "function_signature": False,
- "check_attributes": False,
- "replace_value_regex": [],
- },
- ),
- scrubber_args=[ti.args],
- path=ti.path,
- ginfo=ginfo,
- )
-
- for (
- prefixes,
- llc_tool,
- llc_args,
- preprocess_cmd,
- triple_in_cmd,
- march_in_cmd,
- ) in run_list:
- common.debug("Extracted LLC cmd:", llc_tool, llc_args)
- common.debug("Extracted FileCheck prefixes:", str(prefixes))
-
- raw_tool_output = common.invoke_tool(
- ti.args.llc_binary or llc_tool,
- llc_args,
- ti.path,
- preprocess_cmd,
- verbose=ti.args.verbose,
- )
- triple = triple_in_cmd or triple_in_ir
- if not triple:
- triple = common.get_triple_from_march(march_in_cmd)
-
- scrubber, function_re = output_type.get_run_handler(triple)
- builder.process_run_line(function_re, scrubber, raw_tool_output, prefixes)
- builder.processed_prefixes(prefixes)
-
- func_dict = builder.finish_and_get_func_dict()
- global_vars_seen_dict = {}
-
- is_in_function = False
- is_in_function_start = False
- func_name = None
- prefix_set = set([prefix for p in run_list for prefix in p[0]])
- common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
- output_lines = []
-
- include_generated_funcs = common.find_arg_in_test(
- ti,
- lambda args: ti.args.include_generated_funcs,
- "--include-generated-funcs",
- True,
- )
-
- generated_prefixes = []
- if include_generated_funcs:
- # Generate the appropriate checks for each function. We need to emit
- # these in the order according to the generated output so that CHECK-LABEL
- # works properly. func_order provides that.
-
- # We can't predict where various passes might insert functions so we can't
- # be sure the input function order is maintained. Therefore, first spit
- # out all the source lines.
- common.dump_input_lines(output_lines, ti, prefix_set, ";")
-
- # Now generate all the checks.
- generated_prefixes = common.add_checks_at_end(
- output_lines,
- run_list,
- builder.func_order(),
- check_indent + ";",
- lambda my_output_lines, prefixes, func: output_type.add_checks(
- my_output_lines,
- check_indent + ";",
- prefixes,
- func_dict,
- func,
- ginfo,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- ),
- )
- else:
- for input_info in ti.iterlines(output_lines):
- input_line = input_info.line
- args = input_info.args
- if is_in_function_start:
- if input_line == "":
- continue
- if input_line.lstrip().startswith(";"):
- m = common.CHECK_RE.match(input_line)
- if not m or m.group(1) not in prefix_set:
- output_lines.append(input_line)
- continue
-
- # Print out the various check lines here.
- generated_prefixes.extend(
- output_type.add_checks(
- output_lines,
- check_indent + ";",
- run_list,
- func_dict,
- func_name,
- ginfo,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- )
- )
- is_in_function_start = False
-
- if is_in_function:
- if common.should_add_line_to_output(input_line, prefix_set):
- # This input line of the function body will go as-is into the output.
- output_lines.append(input_line)
- else:
- continue
- if input_line.strip() == "}":
- is_in_function = False
- continue
-
- # If it's outside a function, it just gets copied to the output.
- output_lines.append(input_line)
-
- m = common.IR_FUNCTION_RE.match(input_line)
- if not m:
- continue
- func_name = m.group(1)
- if args.function is not None and func_name != args.function:
- # When filtering on a specific function, skip all others.
- continue
- is_in_function = is_in_function_start = True
-
- if ti.args.gen_unused_prefix_body:
- output_lines.extend(
- ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
- )
-
- common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
- with open(ti.path, "wb") as f:
- f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+ try:
+ update_test(ti)
+ except Exception as e:
+ stderr.write(f"Error: Failed to update test {ti.path}\n")
+ print_exc()
+ returncode = 1
+ return returncode
if __name__ == "__main__":
- main()
+ sys.exit(main())
diff --git a/llvm/utils/update_mc_test_checks.py b/llvm/utils/update_mc_test_checks.py
index c8a40b37088ae..a419f1ce8a73c 100755
--- a/llvm/utils/update_mc_test_checks.py
+++ b/llvm/utils/update_mc_test_checks.py
@@ -5,14 +5,16 @@
from __future__ import print_function
+from sys import stderr
+from traceback import print_exc
import argparse
import functools
import os # Used to advertise this file's name ("autogenerated_note").
-
-from UpdateTestChecks import common
-
import subprocess
import re
+import sys
+
+from UpdateTestChecks import common
mc_LIKE_TOOLS = [
"llvm-mc",
@@ -121,6 +123,278 @@ def getErrCheckLine(prefix, output, mc_mode, line_offset=1):
)
+def update_test(ti: common.TestInfo):
+ if ti.path.endswith(".s"):
+ mc_mode = "asm"
+ elif ti.path.endswith(".txt"):
+ mc_mode = "dasm"
+
+ if ti.args.sort:
+ print("sorting with dasm(.txt) file is not supported!")
+ return -1
+
+ else:
+ common.warn("Expected .s and .txt, Skipping file : ", ti.path)
+ continue
+
+ triple_in_ir = None
+ for l in ti.input_lines:
+ m = common.TRIPLE_IR_RE.match(l)
+ if m:
+ triple_in_ir = m.groups()[0]
+ break
+
+ run_list = []
+ for l in ti.run_lines:
+ if "|" not in l:
+ common.warn("Skipping unparsable RUN line: " + l)
+ continue
+
+ commands = [cmd.strip() for cmd in l.split("|")]
+ assert len(commands) >= 2
+ mc_cmd = " | ".join(commands[:-1])
+ filecheck_cmd = commands[-1]
+
+ # special handling for negating exit status
+ # if not is used in runline, disable rc check, since
+ # the command might or might not
+ # return non-zero code on a single line run
+ check_rc = True
+ mc_cmd_args = mc_cmd.strip().split()
+ if mc_cmd_args[0] == "not":
+ check_rc = False
+ mc_tool = mc_cmd_args[1]
+ mc_cmd = mc_cmd[len(mc_cmd_args[0]) :].strip()
+ else:
+ mc_tool = mc_cmd_args[0]
+
+ triple_in_cmd = None
+ m = common.TRIPLE_ARG_RE.search(mc_cmd)
+ if m:
+ triple_in_cmd = m.groups()[0]
+
+ march_in_cmd = ti.args.default_march
+ m = common.MARCH_ARG_RE.search(mc_cmd)
+ if m:
+ march_in_cmd = m.groups()[0]
+
+ common.verify_filecheck_prefixes(filecheck_cmd)
+
+ mc_like_tools = mc_LIKE_TOOLS[:]
+ if ti.args.tool:
+ mc_like_tools.append(ti.args.tool)
+ if mc_tool not in mc_like_tools:
+ common.warn("Skipping non-mc RUN line: " + l)
+ continue
+
+ if not filecheck_cmd.startswith("FileCheck "):
+ common.warn("Skipping non-FileChecked RUN line: " + l)
+ continue
+
+ mc_cmd_args = mc_cmd[len(mc_tool) :].strip()
+ mc_cmd_args = mc_cmd_args.replace("< %s", "").replace("%s", "").strip()
+ check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+ run_list.append(
+ (
+ check_prefixes,
+ mc_tool,
+ check_rc,
+ mc_cmd_args,
+ triple_in_cmd,
+ march_in_cmd,
+ )
+ )
+
+ # find all test line from input
+ testlines = [l for l in ti.input_lines if isTestLine(l, mc_mode)]
+ # remove duplicated lines to save running time
+ testlines = list(dict.fromkeys(testlines))
+ common.debug("Valid test line found: ", len(testlines))
+
+ run_list_size = len(run_list)
+ testnum = len(testlines)
+
+ raw_output = []
+ raw_prefixes = []
+ for (
+ prefixes,
+ mc_tool,
+ check_rc,
+ mc_args,
+ triple_in_cmd,
+ march_in_cmd,
+ ) in run_list:
+ common.debug("Extracted mc cmd:", mc_tool, mc_args)
+ common.debug("Extracted FileCheck prefixes:", str(prefixes))
+ common.debug("Extracted triple :", str(triple_in_cmd))
+ common.debug("Extracted march:", str(march_in_cmd))
+
+ triple = triple_in_cmd or triple_in_ir
+ if not triple:
+ triple = common.get_triple_from_march(march_in_cmd)
+
+ raw_output.append([])
+ for line in testlines:
+ # get output for each testline
+ out = invoke_tool(
+ ti.args.llvm_mc_binary or mc_tool,
+ check_rc,
+ mc_args,
+ line,
+ verbose=ti.args.verbose,
+ )
+ raw_output[-1].append(out)
+
+ common.debug("Collect raw tool lines:", str(len(raw_output[-1])))
+
+ raw_prefixes.append(prefixes)
+
+ output_lines = []
+ generated_prefixes = {}
+ used_prefixes = set()
+ prefix_set = set([prefix for p in run_list for prefix in p[0]])
+ common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
+
+ for test_id in range(testnum):
+ input_line = testlines[test_id]
+
+ # a {prefix : output, [runid] } dict
+ # insert output to a prefix-key dict, and do a max sorting
+ # to select the most-used prefix which share the same output string
+ p_dict = {}
+ for run_id in range(run_list_size):
+ out = raw_output[run_id][test_id]
+
+ if hasErr(out):
+ o = getErrString(out)
+ else:
+ o = getOutputString(out)
+
+ prefixes = raw_prefixes[run_id]
+
+ for p in prefixes:
+ if p not in p_dict:
+ p_dict[p] = o, [run_id]
+ else:
+ if p_dict[p] == (None, []):
+ continue
+
+ prev_o, run_ids = p_dict[p]
+ if o == prev_o:
+ run_ids.append(run_id)
+ p_dict[p] = o, run_ids
+ else:
+ # conflict, discard
+ p_dict[p] = None, []
+
+ p_dict_sorted = dict(sorted(p_dict.items(), key=lambda item: -len(item[1][1])))
+
+ # prefix is selected and generated with most shared output lines
+ # each run_id can only be used once
+ gen_prefix = ""
+ used_runid = set()
+
+ # line number diff between generated prefix and testline
+ line_offset = 1
+ for prefix, tup in p_dict_sorted.items():
+ o, run_ids = tup
+
+ if len(run_ids) == 0:
+ continue
+
+ skip = False
+ for i in run_ids:
+ if i in used_runid:
+ skip = True
+ else:
+ used_runid.add(i)
+ if not skip:
+ used_prefixes.add(prefix)
+
+ if hasErr(o):
+ newline = getErrCheckLine(prefix, o, mc_mode, line_offset)
+ else:
+ newline = getStdCheckLine(prefix, o, mc_mode)
+
+ if newline:
+ gen_prefix += newline
+ line_offset += 1
+
+ generated_prefixes[input_line] = gen_prefix.rstrip("\n")
+
+ # write output
+ for input_info in ti.iterlines(output_lines):
+ input_line = input_info.line
+ if input_line in testlines:
+ output_lines.append(input_line)
+ output_lines.append(generated_prefixes[input_line])
+
+ elif should_add_line_to_output(input_line, prefix_set, mc_mode):
+ output_lines.append(input_line)
+
+ if ti.args.unique or ti.args.sort:
+ # split with double newlines
+ test_units = "\n".join(output_lines).split("\n\n")
+
+ # select the key line for each test unit
+ test_dic = {}
+ for unit in test_units:
+ lines = unit.split("\n")
+ for l in lines:
+ # if contains multiple lines, use
+ # the first testline or runline as key
+ if isTestLine(l, mc_mode):
+ test_dic[unit] = l
+ break
+ if isRunLine(l):
+ test_dic[unit] = l
+ break
+
+ # unique
+ if ti.args.unique:
+ new_test_units = []
+ written_lines = set()
+ for unit in test_units:
+ # if not testline/runline, we just add it
+ if unit not in test_dic:
+ new_test_units.append(unit)
+ else:
+ if test_dic[unit] in written_lines:
+ common.debug("Duplicated test skipped: ", unit)
+ continue
+
+ written_lines.add(test_dic[unit])
+ new_test_units.append(unit)
+ test_units = new_test_units
+
+ # sort
+ if ti.args.sort:
+
+ def getkey(l):
+ # find key of test unit, otherwise use first line
+ if l in test_dic:
+ line = test_dic[l]
+ else:
+ line = l.split("\n")[0]
+
+ # runline placed on the top
+ return (not isRunLine(line), line)
+
+ test_units = sorted(test_units, key=getkey)
+
+ # join back to be output string
+ output_lines = "\n\n".join(test_units).split("\n")
+
+ # output
+ if ti.args.gen_unused_prefix_body:
+ output_lines.extend(ti.get_checks_for_unused_prefixes(run_list, used_prefixes))
+
+ common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+ with open(ti.path, "wb") as f:
+ f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
@@ -156,283 +430,18 @@ def main():
script_name = os.path.basename(__file__)
+ returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
- if ti.path.endswith(".s"):
- mc_mode = "asm"
- elif ti.path.endswith(".txt"):
- mc_mode = "dasm"
-
- if ti.args.sort:
- print("sorting with dasm(.txt) file is not supported!")
- return -1
-
- else:
- common.warn("Expected .s and .txt, Skipping file : ", ti.path)
- continue
-
- triple_in_ir = None
- for l in ti.input_lines:
- m = common.TRIPLE_IR_RE.match(l)
- if m:
- triple_in_ir = m.groups()[0]
- break
-
- run_list = []
- for l in ti.run_lines:
- if "|" not in l:
- common.warn("Skipping unparsable RUN line: " + l)
- continue
-
- commands = [cmd.strip() for cmd in l.split("|")]
- assert len(commands) >= 2
- mc_cmd = " | ".join(commands[:-1])
- filecheck_cmd = commands[-1]
-
- # special handling for negating exit status
- # if not is used in runline, disable rc check, since
- # the command might or might not
- # return non-zero code on a single line run
- check_rc = True
- mc_cmd_args = mc_cmd.strip().split()
- if mc_cmd_args[0] == "not":
- check_rc = False
- mc_tool = mc_cmd_args[1]
- mc_cmd = mc_cmd[len(mc_cmd_args[0]) :].strip()
- else:
- mc_tool = mc_cmd_args[0]
-
- triple_in_cmd = None
- m = common.TRIPLE_ARG_RE.search(mc_cmd)
- if m:
- triple_in_cmd = m.groups()[0]
-
- march_in_cmd = ti.args.default_march
- m = common.MARCH_ARG_RE.search(mc_cmd)
- if m:
- march_in_cmd = m.groups()[0]
-
- common.verify_filecheck_prefixes(filecheck_cmd)
-
- mc_like_tools = mc_LIKE_TOOLS[:]
- if ti.args.tool:
- mc_like_tools.append(ti.args.tool)
- if mc_tool not in mc_like_tools:
- common.warn("Skipping non-mc RUN line: " + l)
- continue
-
- if not filecheck_cmd.startswith("FileCheck "):
- common.warn("Skipping non-FileChecked RUN line: " + l)
- continue
-
- mc_cmd_args = mc_cmd[len(mc_tool) :].strip()
- mc_cmd_args = mc_cmd_args.replace("< %s", "").replace("%s", "").strip()
- check_prefixes = common.get_check_prefixes(filecheck_cmd)
-
- run_list.append(
- (
- check_prefixes,
- mc_tool,
- check_rc,
- mc_cmd_args,
- triple_in_cmd,
- march_in_cmd,
- )
- )
-
- # find all test line from input
- testlines = [l for l in ti.input_lines if isTestLine(l, mc_mode)]
- # remove duplicated lines to save running time
- testlines = list(dict.fromkeys(testlines))
- common.debug("Valid test line found: ", len(testlines))
-
- run_list_size = len(run_list)
- testnum = len(testlines)
-
- raw_output = []
- raw_prefixes = []
- for (
- prefixes,
- mc_tool,
- check_rc,
- mc_args,
- triple_in_cmd,
- march_in_cmd,
- ) in run_list:
- common.debug("Extracted mc cmd:", mc_tool, mc_args)
- common.debug("Extracted FileCheck prefixes:", str(prefixes))
- common.debug("Extracted triple :", str(triple_in_cmd))
- common.debug("Extracted march:", str(march_in_cmd))
-
- triple = triple_in_cmd or triple_in_ir
- if not triple:
- triple = common.get_triple_from_march(march_in_cmd)
-
- raw_output.append([])
- for line in testlines:
- # get output for each testline
- out = invoke_tool(
- ti.args.llvm_mc_binary or mc_tool,
- check_rc,
- mc_args,
- line,
- verbose=ti.args.verbose,
- )
- raw_output[-1].append(out)
-
- common.debug("Collect raw tool lines:", str(len(raw_output[-1])))
-
- raw_prefixes.append(prefixes)
-
- output_lines = []
- generated_prefixes = {}
- used_prefixes = set()
- prefix_set = set([prefix for p in run_list for prefix in p[0]])
- common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
-
- for test_id in range(testnum):
- input_line = testlines[test_id]
-
- # a {prefix : output, [runid] } dict
- # insert output to a prefix-key dict, and do a max sorting
- # to select the most-used prefix which share the same output string
- p_dict = {}
- for run_id in range(run_list_size):
- out = raw_output[run_id][test_id]
-
- if hasErr(out):
- o = getErrString(out)
- else:
- o = getOutputString(out)
-
- prefixes = raw_prefixes[run_id]
-
- for p in prefixes:
- if p not in p_dict:
- p_dict[p] = o, [run_id]
- else:
- if p_dict[p] == (None, []):
- continue
-
- prev_o, run_ids = p_dict[p]
- if o == prev_o:
- run_ids.append(run_id)
- p_dict[p] = o, run_ids
- else:
- # conflict, discard
- p_dict[p] = None, []
-
- p_dict_sorted = dict(
- sorted(p_dict.items(), key=lambda item: -len(item[1][1]))
- )
-
- # prefix is selected and generated with most shared output lines
- # each run_id can only be used once
- gen_prefix = ""
- used_runid = set()
-
- # line number diff between generated prefix and testline
- line_offset = 1
- for prefix, tup in p_dict_sorted.items():
- o, run_ids = tup
-
- if len(run_ids) == 0:
- continue
-
- skip = False
- for i in run_ids:
- if i in used_runid:
- skip = True
- else:
- used_runid.add(i)
- if not skip:
- used_prefixes.add(prefix)
-
- if hasErr(o):
- newline = getErrCheckLine(prefix, o, mc_mode, line_offset)
- else:
- newline = getStdCheckLine(prefix, o, mc_mode)
-
- if newline:
- gen_prefix += newline
- line_offset += 1
-
- generated_prefixes[input_line] = gen_prefix.rstrip("\n")
-
- # write output
- for input_info in ti.iterlines(output_lines):
- input_line = input_info.line
- if input_line in testlines:
- output_lines.append(input_line)
- output_lines.append(generated_prefixes[input_line])
-
- elif should_add_line_to_output(input_line, prefix_set, mc_mode):
- output_lines.append(input_line)
-
- if ti.args.unique or ti.args.sort:
- # split with double newlines
- test_units = "\n".join(output_lines).split("\n\n")
-
- # select the key line for each test unit
- test_dic = {}
- for unit in test_units:
- lines = unit.split("\n")
- for l in lines:
- # if contains multiple lines, use
- # the first testline or runline as key
- if isTestLine(l, mc_mode):
- test_dic[unit] = l
- break
- if isRunLine(l):
- test_dic[unit] = l
- break
-
- # unique
- if ti.args.unique:
- new_test_units = []
- written_lines = set()
- for unit in test_units:
- # if not testline/runline, we just add it
- if unit not in test_dic:
- new_test_units.append(unit)
- else:
- if test_dic[unit] in written_lines:
- common.debug("Duplicated test skipped: ", unit)
- continue
-
- written_lines.add(test_dic[unit])
- new_test_units.append(unit)
- test_units = new_test_units
-
- # sort
- if ti.args.sort:
-
- def getkey(l):
- # find key of test unit, otherwise use first line
- if l in test_dic:
- line = test_dic[l]
- else:
- line = l.split("\n")[0]
-
- # runline placed on the top
- return (not isRunLine(line), line)
-
- test_units = sorted(test_units, key=getkey)
-
- # join back to be output string
- output_lines = "\n\n".join(test_units).split("\n")
-
- # output
- if ti.args.gen_unused_prefix_body:
- output_lines.extend(
- ti.get_checks_for_unused_prefixes(run_list, used_prefixes)
- )
-
- common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
- with open(ti.path, "wb") as f:
- f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+ try:
+ update_test(ti)
+ except Exception:
+ stderr.write(f"Error: Failed to update test {ti.path}\n")
+ print_exc()
+ returncode = 1
+ return returncode
if __name__ == "__main__":
- main()
+ sys.exit(main())
diff --git a/llvm/utils/update_mca_test_checks.py b/llvm/utils/update_mca_test_checks.py
index 486cb66b827f3..67494566bf265 100755
--- a/llvm/utils/update_mca_test_checks.py
+++ b/llvm/utils/update_mca_test_checks.py
@@ -6,6 +6,8 @@
FileCheck patterns.
"""
+from sys import stderr
+from traceback import print_exc
import argparse
from collections import defaultdict
import glob
@@ -570,13 +572,16 @@ def main():
if "llvm-mca" not in os.path.basename(args.llvm_mca_binary):
_warn("unexpected binary name: {}".format(args.llvm_mca_binary))
+ returncode = 0
for ti in common.itertests(args.tests, parser, script_name=script_name):
try:
update_test_file(ti.args, ti.path, ti.test_autogenerated_note)
except Exception:
- common.warn("Error processing file", test_file=ti.path)
- raise
- return 0
+ stderr.write(f"Error: Failed to update test {ti.path}\n")
+ print_exc()
+ returncode = 1
+ return returncode
+
if __name__ == "__main__":
try:
diff --git a/llvm/utils/update_mir_test_checks.py b/llvm/utils/update_mir_test_checks.py
index 2ee156dd7faf7..86147034d946b 100755
--- a/llvm/utils/update_mir_test_checks.py
+++ b/llvm/utils/update_mir_test_checks.py
@@ -20,6 +20,8 @@
from __future__ import print_function
+from sys import stderr
+from traceback import print_exc
import argparse
import collections
import glob
@@ -495,13 +497,16 @@ def main():
args = common.parse_commandline_args(parser)
script_name = os.path.basename(__file__)
+ returncode = 0
for ti in common.itertests(args.tests, parser, script_name="utils/" + script_name):
try:
update_test_file(ti.args, ti.path, ti.test_autogenerated_note)
- except Exception:
- common.warn("Error processing file", test_file=ti.path)
- raise
+ except Exception as e:
+ stderr.write(f"Error: Failed to update test {ti.path}\n")
+ print_exc()
+ returncode = 1
+ return returncode
if __name__ == "__main__":
- main()
+ sys.exit(main())
diff --git a/llvm/utils/update_test_checks.py b/llvm/utils/update_test_checks.py
index 4f83c02f19f6f..f4b45c5581c71 100755
--- a/llvm/utils/update_test_checks.py
+++ b/llvm/utils/update_test_checks.py
@@ -34,6 +34,8 @@
from __future__ import print_function
+from sys import stderr
+from traceback import print_exc
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import re
@@ -42,6 +44,260 @@
from UpdateTestChecks import common
+def update_test(ti: common.TestInfo):
+ # If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces
+ if ti.args.scrub_attributes:
+ common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
+ common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE
+ )
+ else:
+ common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_RE
+
+ tool_basename = ti.args.tool
+
+ prefix_list = []
+ for l in ti.run_lines:
+ if "|" not in l:
+ common.warn("Skipping unparsable RUN line: " + l)
+ continue
+
+ cropped_content = l
+ if "%if" in l:
+ match = re.search(r"%{\s*(.*?)\s*%}", l)
+ if match:
+ cropped_content = match.group(1)
+
+ commands = [cmd.strip() for cmd in cropped_content.split("|")]
+ assert len(commands) >= 2
+ preprocess_cmd = None
+ if len(commands) > 2:
+ preprocess_cmd = " | ".join(commands[:-2])
+ tool_cmd = commands[-2]
+ filecheck_cmd = commands[-1]
+ common.verify_filecheck_prefixes(filecheck_cmd)
+ if not tool_cmd.startswith(tool_basename + " "):
+ common.warn("Skipping non-%s RUN line: %s" % (tool_basename, l))
+ continue
+
+ if not filecheck_cmd.startswith("FileCheck "):
+ common.warn("Skipping non-FileChecked RUN line: " + l)
+ continue
+
+ tool_cmd_args = tool_cmd[len(tool_basename) :].strip()
+ tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
+ check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+ # FIXME: We should use multiple check prefixes to common check lines. For
+ # now, we just ignore all but the last.
+ prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd))
+
+ ginfo = common.make_ir_generalizer(ti.args.version, ti.args.check_globals == "none")
+ global_vars_seen_dict = {}
+ builder = common.FunctionTestBuilder(
+ run_list=prefix_list,
+ flags=ti.args,
+ scrubber_args=[],
+ path=ti.path,
+ ginfo=ginfo,
+ )
+
+ tool_binary = ti.args.tool_binary
+ if not tool_binary:
+ tool_binary = tool_basename
+
+ for prefixes, tool_args, preprocess_cmd in prefix_list:
+ common.debug("Extracted tool cmd: " + tool_basename + " " + tool_args)
+ common.debug("Extracted FileCheck prefixes: " + str(prefixes))
+
+ raw_tool_output = common.invoke_tool(
+ tool_binary,
+ tool_args,
+ ti.path,
+ preprocess_cmd=preprocess_cmd,
+ verbose=ti.args.verbose,
+ )
+ builder.process_run_line(
+ common.OPT_FUNCTION_RE,
+ common.scrub_body,
+ raw_tool_output,
+ prefixes,
+ )
+ builder.processed_prefixes(prefixes)
+
+ prefix_set = set([prefix for prefixes, _, _ in prefix_list for prefix in prefixes])
+
+ if not ti.args.reset_variable_names:
+ original_check_lines = common.collect_original_check_lines(ti, prefix_set)
+ else:
+ original_check_lines = {}
+
+ func_dict = builder.finish_and_get_func_dict()
+ is_in_function = False
+ is_in_function_start = False
+ has_checked_pre_function_globals = False
+ common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
+ output_lines = []
+
+ include_generated_funcs = common.find_arg_in_test(
+ ti,
+ lambda args: ti.args.include_generated_funcs,
+ "--include-generated-funcs",
+ True,
+ )
+ generated_prefixes = []
+ if include_generated_funcs:
+ # Generate the appropriate checks for each function. We need to emit
+ # these in the order according to the generated output so that CHECK-LABEL
+ # works properly. func_order provides that.
+
+ # We can't predict where various passes might insert functions so we can't
+ # be sure the input function order is maintained. Therefore, first spit
+ # out all the source lines.
+ common.dump_input_lines(output_lines, ti, prefix_set, ";")
+
+ args = ti.args
+ if args.check_globals != "none":
+ generated_prefixes.extend(
+ common.add_global_checks(
+ builder.global_var_dict(),
+ ";",
+ prefix_list,
+ output_lines,
+ ginfo,
+ global_vars_seen_dict,
+ args.preserve_names,
+ True,
+ args.check_globals,
+ )
+ )
+
+ # Now generate all the checks.
+ generated_prefixes.extend(
+ common.add_checks_at_end(
+ output_lines,
+ prefix_list,
+ builder.func_order(),
+ ";",
+ lambda my_output_lines, prefixes, func: common.add_ir_checks(
+ my_output_lines,
+ ";",
+ prefixes,
+ func_dict,
+ func,
+ False,
+ args.function_signature,
+ ginfo,
+ global_vars_seen_dict,
+ is_filtered=builder.is_filtered(),
+ original_check_lines=original_check_lines.get(func, {}),
+ ),
+ )
+ )
+ else:
+ # "Normal" mode.
+ dropped_previous_line = False
+ for input_line_info in ti.iterlines(output_lines):
+ input_line = input_line_info.line
+ args = input_line_info.args
+ if is_in_function_start:
+ if input_line == "":
+ continue
+ if input_line.lstrip().startswith(";"):
+ m = common.CHECK_RE.match(input_line)
+ if not m or m.group(1) not in prefix_set:
+ output_lines.append(input_line)
+ continue
+
+ # Print out the various check lines here.
+ generated_prefixes.extend(
+ common.add_ir_checks(
+ output_lines,
+ ";",
+ prefix_list,
+ func_dict,
+ func_name,
+ args.preserve_names,
+ args.function_signature,
+ ginfo,
+ global_vars_seen_dict,
+ is_filtered=builder.is_filtered(),
+ original_check_lines=original_check_lines.get(func_name, {}),
+ )
+ )
+ is_in_function_start = False
+
+ m = common.IR_FUNCTION_RE.match(input_line)
+ if m and not has_checked_pre_function_globals:
+ if args.check_globals:
+ generated_prefixes.extend(
+ common.add_global_checks(
+ builder.global_var_dict(),
+ ";",
+ prefix_list,
+ output_lines,
+ ginfo,
+ global_vars_seen_dict,
+ args.preserve_names,
+ True,
+ args.check_globals,
+ )
+ )
+ has_checked_pre_function_globals = True
+
+ if common.should_add_line_to_output(
+ input_line,
+ prefix_set,
+ skip_global_checks=not is_in_function,
+ skip_same_checks=dropped_previous_line,
+ ):
+ # This input line of the function body will go as-is into the output.
+ # Except make leading whitespace uniform: 2 spaces.
+ input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r" ", input_line)
+ output_lines.append(input_line)
+ dropped_previous_line = False
+ if input_line.strip() == "}":
+ is_in_function = False
+ continue
+ else:
+ # If we are removing a check line, and the next line is CHECK-SAME, it MUST also be removed
+ dropped_previous_line = True
+
+ if is_in_function:
+ continue
+
+ m = common.IR_FUNCTION_RE.match(input_line)
+ if not m:
+ continue
+ func_name = m.group(1)
+ if args.function is not None and func_name != args.function:
+ # When filtering on a specific function, skip all others.
+ continue
+ is_in_function = is_in_function_start = True
+
+ if args.check_globals != "none":
+ generated_prefixes.extend(
+ common.add_global_checks(
+ builder.global_var_dict(),
+ ";",
+ prefix_list,
+ output_lines,
+ ginfo,
+ global_vars_seen_dict,
+ args.preserve_names,
+ False,
+ args.check_globals,
+ )
+ )
+ if ti.args.gen_unused_prefix_body:
+ output_lines.extend(
+ ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
+ )
+ common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+
+ with open(ti.path, "wb") as f:
+ f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+
def main():
from argparse import RawTextHelpFormatter
@@ -102,271 +358,18 @@ def main():
common.error("Unexpected tool name: " + tool_basename)
sys.exit(1)
+ returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
- # If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces
- if ti.args.scrub_attributes:
- common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
- common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE
- )
- else:
- common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
- common.SCRUB_TRAILING_WHITESPACE_RE
- )
-
- tool_basename = ti.args.tool
-
- prefix_list = []
- for l in ti.run_lines:
- if "|" not in l:
- common.warn("Skipping unparsable RUN line: " + l)
- continue
-
- cropped_content = l
- if "%if" in l:
- match = re.search(r"%{\s*(.*?)\s*%}", l)
- if match:
- cropped_content = match.group(1)
-
- commands = [cmd.strip() for cmd in cropped_content.split("|")]
- assert len(commands) >= 2
- preprocess_cmd = None
- if len(commands) > 2:
- preprocess_cmd = " | ".join(commands[:-2])
- tool_cmd = commands[-2]
- filecheck_cmd = commands[-1]
- common.verify_filecheck_prefixes(filecheck_cmd)
- if not tool_cmd.startswith(tool_basename + " "):
- common.warn("Skipping non-%s RUN line: %s" % (tool_basename, l))
- continue
-
- if not filecheck_cmd.startswith("FileCheck "):
- common.warn("Skipping non-FileChecked RUN line: " + l)
- continue
-
- tool_cmd_args = tool_cmd[len(tool_basename) :].strip()
- tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
- check_prefixes = common.get_check_prefixes(filecheck_cmd)
-
- # FIXME: We should use multiple check prefixes to common check lines. For
- # now, we just ignore all but the last.
- prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd))
-
- ginfo = common.make_ir_generalizer(
- ti.args.version, ti.args.check_globals == "none"
- )
- global_vars_seen_dict = {}
- builder = common.FunctionTestBuilder(
- run_list=prefix_list,
- flags=ti.args,
- scrubber_args=[],
- path=ti.path,
- ginfo=ginfo,
- )
-
- tool_binary = ti.args.tool_binary
- if not tool_binary:
- tool_binary = tool_basename
-
- for prefixes, tool_args, preprocess_cmd in prefix_list:
- common.debug("Extracted tool cmd: " + tool_basename + " " + tool_args)
- common.debug("Extracted FileCheck prefixes: " + str(prefixes))
-
- raw_tool_output = common.invoke_tool(
- tool_binary,
- tool_args,
- ti.path,
- preprocess_cmd=preprocess_cmd,
- verbose=ti.args.verbose,
- )
- builder.process_run_line(
- common.OPT_FUNCTION_RE,
- common.scrub_body,
- raw_tool_output,
- prefixes,
- )
- builder.processed_prefixes(prefixes)
-
- prefix_set = set(
- [prefix for prefixes, _, _ in prefix_list for prefix in prefixes]
- )
-
- if not ti.args.reset_variable_names:
- original_check_lines = common.collect_original_check_lines(ti, prefix_set)
- else:
- original_check_lines = {}
-
- func_dict = builder.finish_and_get_func_dict()
- is_in_function = False
- is_in_function_start = False
- has_checked_pre_function_globals = False
- common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
- output_lines = []
-
- include_generated_funcs = common.find_arg_in_test(
- ti,
- lambda args: ti.args.include_generated_funcs,
- "--include-generated-funcs",
- True,
- )
- generated_prefixes = []
- if include_generated_funcs:
- # Generate the appropriate checks for each function. We need to emit
- # these in the order according to the generated output so that CHECK-LABEL
- # works properly. func_order provides that.
-
- # We can't predict where various passes might insert functions so we can't
- # be sure the input function order is maintained. Therefore, first spit
- # out all the source lines.
- common.dump_input_lines(output_lines, ti, prefix_set, ";")
-
- args = ti.args
- if args.check_globals != 'none':
- generated_prefixes.extend(
- common.add_global_checks(
- builder.global_var_dict(),
- ";",
- prefix_list,
- output_lines,
- ginfo,
- global_vars_seen_dict,
- args.preserve_names,
- True,
- args.check_globals,
- )
- )
-
- # Now generate all the checks.
- generated_prefixes.extend(
- common.add_checks_at_end(
- output_lines,
- prefix_list,
- builder.func_order(),
- ";",
- lambda my_output_lines, prefixes, func: common.add_ir_checks(
- my_output_lines,
- ";",
- prefixes,
- func_dict,
- func,
- False,
- args.function_signature,
- ginfo,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- original_check_lines=original_check_lines.get(func, {}),
- ),
- )
- )
- else:
- # "Normal" mode.
- dropped_previous_line = False
- for input_line_info in ti.iterlines(output_lines):
- input_line = input_line_info.line
- args = input_line_info.args
- if is_in_function_start:
- if input_line == "":
- continue
- if input_line.lstrip().startswith(";"):
- m = common.CHECK_RE.match(input_line)
- if not m or m.group(1) not in prefix_set:
- output_lines.append(input_line)
- continue
-
- # Print out the various check lines here.
- generated_prefixes.extend(
- common.add_ir_checks(
- output_lines,
- ";",
- prefix_list,
- func_dict,
- func_name,
- args.preserve_names,
- args.function_signature,
- ginfo,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- original_check_lines=original_check_lines.get(
- func_name, {}
- ),
- )
- )
- is_in_function_start = False
-
- m = common.IR_FUNCTION_RE.match(input_line)
- if m and not has_checked_pre_function_globals:
- if args.check_globals:
- generated_prefixes.extend(
- common.add_global_checks(
- builder.global_var_dict(),
- ";",
- prefix_list,
- output_lines,
- ginfo,
- global_vars_seen_dict,
- args.preserve_names,
- True,
- args.check_globals,
- )
- )
- has_checked_pre_function_globals = True
-
- if common.should_add_line_to_output(
- input_line,
- prefix_set,
- skip_global_checks=not is_in_function,
- skip_same_checks=dropped_previous_line,
- ):
- # This input line of the function body will go as-is into the output.
- # Except make leading whitespace uniform: 2 spaces.
- input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(
- r" ", input_line
- )
- output_lines.append(input_line)
- dropped_previous_line = False
- if input_line.strip() == "}":
- is_in_function = False
- continue
- else:
- # If we are removing a check line, and the next line is CHECK-SAME, it MUST also be removed
- dropped_previous_line = True
-
- if is_in_function:
- continue
-
- m = common.IR_FUNCTION_RE.match(input_line)
- if not m:
- continue
- func_name = m.group(1)
- if args.function is not None and func_name != args.function:
- # When filtering on a specific function, skip all others.
- continue
- is_in_function = is_in_function_start = True
-
- if args.check_globals != 'none':
- generated_prefixes.extend(
- common.add_global_checks(
- builder.global_var_dict(),
- ";",
- prefix_list,
- output_lines,
- ginfo,
- global_vars_seen_dict,
- args.preserve_names,
- False,
- args.check_globals,
- )
- )
- if ti.args.gen_unused_prefix_body:
- output_lines.extend(
- ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
- )
- common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
-
- with open(ti.path, "wb") as f:
- f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+ try:
+ update_test(ti)
+ except Exception as e:
+ stderr.write(f"Error: Failed to update test {ti.path}\n")
+ print_exc()
+ returncode = 1
+ return returncode
if __name__ == "__main__":
- main()
+ sys.exit(main())
>From e5260036afa813d0987b2b429b67aad781b9467a Mon Sep 17 00:00:00 2001
From: Matthias Braun <matze at braunis.de>
Date: Tue, 29 Apr 2025 11:41:18 -0700
Subject: [PATCH 2/2] Adapt code to being moved into a function
---
llvm/utils/update_mc_test_checks.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/llvm/utils/update_mc_test_checks.py b/llvm/utils/update_mc_test_checks.py
index a419f1ce8a73c..ab7fe19c432de 100755
--- a/llvm/utils/update_mc_test_checks.py
+++ b/llvm/utils/update_mc_test_checks.py
@@ -130,12 +130,10 @@ def update_test(ti: common.TestInfo):
mc_mode = "dasm"
if ti.args.sort:
- print("sorting with dasm(.txt) file is not supported!")
- return -1
-
+ raise Exception("sorting with dasm(.txt) file is not supported!")
else:
common.warn("Expected .s and .txt, Skipping file : ", ti.path)
- continue
+ return
triple_in_ir = None
for l in ti.input_lines:
More information about the llvm-commits
mailing list