Vect: Optimize truncation for .SAT_SUB operands

To get better vectorized code of .SAT_SUB,  we would like to avoid the
truncated operation for the assignment.  For example, as below.

unsigned int _1;
unsigned int _2;
unsigned short int _4;
_9 = (unsigned short int).SAT_SUB (_1, _2);

If we make sure that the _1 is in the range of unsigned short int.  Such
as a def similar to:

_1 = (unsigned short int)_4;

Then we can do the distribute the truncation operation to:

_3 = (unsigned short int) MIN (65535, _2); // aka _3 = .SAT_TRUNC (_2);
_9 = .SAT_SUB (_4, _3);

Then,  we can better vectorized code and avoid the unnecessary narrowing
stmt during vectorization with below stmt(s).

_3 = .SAT_TRUNC(_2); // SI => HI
_9 = .SAT_SUB (_4, _3);

Let's take RISC-V vector as example to tell the changes.  For below
sample code:

__attribute__((noinline))
void test (uint16_t *x, unsigned b, unsigned n)
{
  unsigned a = 0;
  uint16_t *p = x;

  do {
    a = *--p;
    *p = (uint16_t)(a >= b ? a - b : 0);
  } while (--n);
}

Before this patch:
  ...
  .L3:
  vle16.v	v1,0(a3)
  vrsub.vx	v5,v2,t1
  mv	t3,a4
  addw	a4,a4,t5
  vrgather.vv	v3,v1,v5
  vsetvli	zero,zero,e32,m1,ta,ma
  vzext.vf2	v1,v3
  vssubu.vx	v1,v1,a1
  vsetvli	zero,zero,e16,mf2,ta,ma
  vncvt.x.x.w	v1,v1
  vrgather.vv	v3,v1,v5
  vse16.v	v3,0(a3)
  sub	a3,a3,t4
  bgtu	t6,a4,.L3
  ...

After this patch:
test:
  ...
  .L3:
  vle16.v     v3,0(a3)
  vrsub.vx    v5,v2,a6
  mv          a7,a4
  addw        a4,a4,t3
  vrgather.vv v1,v3,v5
  vssubu.vv   v1,v1,v6
  vrgather.vv v3,v1,v5
  vse16.v     v3,0(a3)
  sub     a3,a3,t1
  bgtu    t4,a4,.L3
  ...

The below test suites are passed for this patch:
1. The rv64gcv fully regression tests.
2. The rv64gcv build with glibc.
3. The x86 bootstrap tests.
4. The x86 fully regression tests.

gcc/ChangeLog:

	* tree-vect-patterns.cc (vect_recog_sat_sub_pattern_transform):
	Add new func impl to perform the truncation distribution.
	(vect_recog_sat_sub_pattern): Perform above optimize before
	generate .SAT_SUB call.

Signed-off-by: Pan Li <pan2.li@intel.com>
This commit is contained in:
Pan Li 2024-06-30 10:55:50 +08:00
parent d8cd852118
commit 3918bea620

View file

@ -4566,6 +4566,70 @@ vect_recog_sat_add_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
return NULL;
}
/*
* Try to transform the truncation for .SAT_SUB pattern, mostly occurs in
* the benchmark zip. Aka:
*
* unsigned int _1;
* unsigned int _2;
* unsigned short int _4;
* _9 = (unsigned short int).SAT_SUB (_1, _2);
*
* if _1 is known to be in the range of unsigned short int. For example
* there is a def _1 = (unsigned short int)_4. Then we can transform the
* truncation to:
*
* _3 = (unsigned short int) MIN (65535, _2); // aka _3 = .SAT_TRUNC (_2);
* _9 = .SAT_SUB (_4, _3);
*
* Then, we can better vectorized code and avoid the unnecessary narrowing
* stmt during vectorization with below stmt(s).
*
* _3 = .SAT_TRUNC(_2); // SI => HI
* _9 = .SAT_SUB (_4, _3);
*/
static void
vect_recog_sat_sub_pattern_transform (vec_info *vinfo,
stmt_vec_info stmt_vinfo,
tree lhs, tree *ops)
{
tree otype = TREE_TYPE (lhs);
tree itype = TREE_TYPE (ops[0]);
unsigned itype_prec = TYPE_PRECISION (itype);
unsigned otype_prec = TYPE_PRECISION (otype);
if (types_compatible_p (otype, itype) || otype_prec >= itype_prec)
return;
tree v_otype = get_vectype_for_scalar_type (vinfo, otype);
tree v_itype = get_vectype_for_scalar_type (vinfo, itype);
tree_pair v_pair = tree_pair (v_otype, v_itype);
if (v_otype == NULL_TREE || v_itype == NULL_TREE
|| !direct_internal_fn_supported_p (IFN_SAT_TRUNC, v_pair,
OPTIMIZE_FOR_BOTH))
return;
/* 1. Find the _4 and update ops[0] as above example. */
vect_unpromoted_value unprom;
tree tmp = vect_look_through_possible_promotion (vinfo, ops[0], &unprom);
if (tmp == NULL_TREE || TYPE_PRECISION (unprom.type) != otype_prec)
return;
ops[0] = tmp;
/* 2. Generate _3 = .SAT_TRUNC (_2) and update ops[1] as above example. */
tree trunc_lhs_ssa = vect_recog_temp_ssa_var (otype, NULL);
gcall *call = gimple_build_call_internal (IFN_SAT_TRUNC, 1, ops[1]);
gimple_call_set_lhs (call, trunc_lhs_ssa);
gimple_call_set_nothrow (call, /* nothrow_p */ false);
append_pattern_def_seq (vinfo, stmt_vinfo, call, v_otype);
ops[1] = trunc_lhs_ssa;
}
/*
* Try to detect saturation sub pattern (SAT_ADD), aka below gimple:
* _7 = _1 >= _2;
@ -4591,6 +4655,7 @@ vect_recog_sat_sub_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
if (gimple_unsigned_integer_sat_sub (lhs, ops, NULL))
{
vect_recog_sat_sub_pattern_transform (vinfo, stmt_vinfo, lhs, ops);
gimple *stmt = vect_recog_build_binary_gimple_stmt (vinfo, stmt_vinfo,
IFN_SAT_SUB, type_out,
lhs, ops[0], ops[1]);