From 0feece18e6993d02f24a9381ddb5420bb4509554 Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Sun, 21 May 2023 13:36:56 +0200 Subject: [PATCH] atch.pd: Ensure (op CONSTANT_CLASS_P CONSTANT_CLASS_P) is simplified [PR109505] On the following testcase we hang, because POLY_INT_CST is CONSTANT_CLASS_P, but BIT_AND_EXPR with it and INTEGER_CST doesn't simplify and the (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) simplification actually relies on the (CST1 & CST2) simplification, otherwise it is a deoptimization, trading 2 ops for 3 and furthermore running into /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both operands are another bit-wise operation with a common input. If so, distribute the bit operations to save an operation and possibly two if constants are involved. For example, convert (A | B) & (A | C) into A | (B & C) Further simplification will occur if B and C are constants. */ simplification which simplifies that (x & CST2) | (CST1 & CST2) back to CST2 & (x | CST1). I went through all other places I could find where we have a simplification with 2 CONSTANT_CLASS_P operands and perform some operation on those two, while the other spots aren't that severe (just trade 2 operations for another 2 if the two constants don't simplify, rather than as in the above case trading 2 ops for 3), I still think all those spots really intend to optimize only if the 2 constants simplify. So, the following patch adds to those a ! modifier to ensure that, even at GENERIC that modifier means !EXPR_P which is exactly what we want IMHO. 2023-05-21 Jakub Jelinek PR tree-optimization/109505 * match.pd ((x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2), Combine successive equal operations with constants, (A +- CST1) +- CST2 -> A + CST3, (CST1 - A) +- CST2 -> CST3 - A, CST1 - (CST2 - A) -> CST3 + A): Use ! on ops with 2 CONSTANT_CLASS_P operands. * gcc.target/aarch64/sve/pr109505.c: New test. (cherry picked from commit f211757f6fa9515e3fd1a4f66f1a8b48e500c9de) --- gcc/match.pd | 20 +++++++++---------- .../gcc.target/aarch64/sve/pr109505.c | 12 +++++++++++ 2 files changed, 22 insertions(+), 10 deletions(-) create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/pr109505.c diff --git a/gcc/match.pd b/gcc/match.pd index 1410ecdf73d..737a45ff077 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -1853,7 +1853,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */ (simplify (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) - (bit_ior (bit_and @0 @2) (bit_and @1 @2))) + (bit_ior (bit_and @0 @2) (bit_and! @1 @2))) /* Combine successive equal operations with constants. */ (for bitop (bit_and bit_ior bit_xor) @@ -1862,7 +1862,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) (if (!CONSTANT_CLASS_P (@0)) /* This is the canonical form regardless of whether (bitop @1 @2) can be folded to a constant. */ - (bitop @0 (bitop @1 @2)) + (bitop @0 (bitop! @1 @2)) /* In this case we have three constants and (bitop @0 @1) doesn't fold to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if the values involved are such that the operation can't be decided at @@ -2923,13 +2923,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) forever if something doesn't simplify into a constant. */ (if (!CONSTANT_CLASS_P (@0)) (if (outer_op == PLUS_EXPR) - (plus (view_convert @0) (inner_op @2 (view_convert @1))) - (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))) + (plus (view_convert @0) (inner_op! @2 (view_convert @1))) + (minus (view_convert @0) (neg_inner_op! @2 (view_convert @1))))) (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) (if (outer_op == PLUS_EXPR) - (view_convert (plus @0 (inner_op (view_convert @2) @1))) - (view_convert (minus @0 (neg_inner_op (view_convert @2) @1)))) + (view_convert (plus @0 (inner_op! (view_convert @2) @1))) + (view_convert (minus @0 (neg_inner_op! (view_convert @2) @1)))) /* If the constant operation overflows we cannot do the transform directly as we would introduce undefined overflow, for example with (a - 1) + INT_MIN. */ @@ -2960,10 +2960,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse forever if something doesn't simplify into a constant. */ (if (!CONSTANT_CLASS_P (@0)) - (minus (outer_op (view_convert @1) @2) (view_convert @0))) + (minus (outer_op! (view_convert @1) @2) (view_convert @0))) (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) - (view_convert (minus (outer_op @1 (view_convert @2)) @0)) + (view_convert (minus (outer_op! @1 (view_convert @2)) @0)) (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type)) (with { tree cst = const_binop (outer_op, type, @1, @2); } (if (cst && !TREE_OVERFLOW (cst)) @@ -2979,10 +2979,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse forever if something doesn't simplify into a constant. */ (if (!CONSTANT_CLASS_P (@0)) - (plus (view_convert @0) (minus @1 (view_convert @2)))) + (plus (view_convert @0) (minus! @1 (view_convert @2)))) (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) - (view_convert (plus @0 (minus (view_convert @1) @2))) + (view_convert (plus @0 (minus! (view_convert @1) @2))) (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type)) (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); } (if (cst && !TREE_OVERFLOW (cst)) diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pr109505.c b/gcc/testsuite/gcc.target/aarch64/sve/pr109505.c new file mode 100644 index 00000000000..b975ae75ae6 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/pr109505.c @@ -0,0 +1,12 @@ +/* PR tree-optimization/109505 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -march=armv8.2-a+sve" } */ + +#pragma GCC aarch64 "arm_sve.h" + +unsigned long +foo (unsigned long x) +{ + unsigned long y = svcntb (); + return (x | 15) & y; +}