[PATCH] RISC-V: Move UNSPEC_SSP_SET and UNSPEC_SSP_TEST to correct enum
[gcc.git] / gcc / optabs-tree.cc
blob6dfe8ee4c4e48c8739ce10f2a7ecec8550aa3768
1 /* Tree-based target query functions relating to optabs
2 Copyright (C) 1987-2025 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "optabs.h"
30 #include "optabs-tree.h"
31 #include "stor-layout.h"
32 #include "internal-fn.h"
34 /* Return the optab used for computing the operation given by the tree code,
35 CODE and the tree EXP. This function is not always usable (for example, it
36 cannot give complete results for multiplication or division) but probably
37 ought to be relied on more widely throughout the expander. */
38 optab
39 optab_for_tree_code (enum tree_code code, const_tree type,
40 enum optab_subtype subtype)
42 bool trapv;
43 switch (code)
45 case BIT_AND_EXPR:
46 return and_optab;
48 case BIT_IOR_EXPR:
49 return ior_optab;
51 case BIT_NOT_EXPR:
52 return one_cmpl_optab;
54 case BIT_XOR_EXPR:
55 return xor_optab;
57 case MULT_HIGHPART_EXPR:
58 return TYPE_UNSIGNED (type) ? umul_highpart_optab : smul_highpart_optab;
60 case CEIL_MOD_EXPR:
61 case FLOOR_MOD_EXPR:
62 case ROUND_MOD_EXPR:
63 /* {s,u}mod_optab implements TRUNC_MOD_EXPR. For scalar modes,
64 expansion has code to adjust TRUNC_MOD_EXPR into the desired other
65 modes, but for vector modes it does not. The adjustment code
66 should be instead emitted in tree-vect-patterns.cc. */
67 if (VECTOR_TYPE_P (type))
68 return unknown_optab;
69 /* FALLTHRU */
70 case TRUNC_MOD_EXPR:
71 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
73 case CEIL_DIV_EXPR:
74 case FLOOR_DIV_EXPR:
75 case ROUND_DIV_EXPR:
76 /* {,u}{s,u}div_optab implements {TRUNC,EXACT}_DIV_EXPR or RDIV_EXPR.
77 For scalar modes, expansion has code to adjust TRUNC_DIV_EXPR
78 into the desired other modes, but for vector modes it does not.
79 The adjustment code should be instead emitted in
80 tree-vect-patterns.cc. */
81 if (VECTOR_TYPE_P (type))
82 return unknown_optab;
83 /* FALLTHRU */
84 case RDIV_EXPR:
85 case TRUNC_DIV_EXPR:
86 case EXACT_DIV_EXPR:
87 if (TYPE_SATURATING (type))
88 return TYPE_UNSIGNED (type) ? usdiv_optab : ssdiv_optab;
89 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
91 case LSHIFT_EXPR:
92 if (VECTOR_TYPE_P (type))
94 if (subtype == optab_vector)
95 return TYPE_SATURATING (type) ? unknown_optab : vashl_optab;
97 gcc_assert (subtype == optab_scalar);
99 if (TYPE_SATURATING (type))
100 return TYPE_UNSIGNED (type) ? usashl_optab : ssashl_optab;
101 return ashl_optab;
103 case RSHIFT_EXPR:
104 if (VECTOR_TYPE_P (type))
106 if (subtype == optab_vector)
107 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
109 gcc_assert (subtype == optab_scalar);
111 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
113 case LROTATE_EXPR:
114 if (VECTOR_TYPE_P (type))
116 if (subtype == optab_vector)
117 return vrotl_optab;
119 gcc_assert (subtype == optab_scalar);
121 return rotl_optab;
123 case RROTATE_EXPR:
124 if (VECTOR_TYPE_P (type))
126 if (subtype == optab_vector)
127 return vrotr_optab;
129 gcc_assert (subtype == optab_scalar);
131 return rotr_optab;
133 case MAX_EXPR:
134 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
136 case MIN_EXPR:
137 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
139 case POINTER_PLUS_EXPR:
140 return add_optab;
142 case POINTER_DIFF_EXPR:
143 return sub_optab;
145 case REALIGN_LOAD_EXPR:
146 return vec_realign_load_optab;
148 case WIDEN_SUM_EXPR:
149 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
151 case DOT_PROD_EXPR:
153 if (subtype == optab_vector_mixed_sign)
154 return usdot_prod_optab;
156 return (TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab);
159 case SAD_EXPR:
160 return TYPE_UNSIGNED (type) ? usad_optab : ssad_optab;
162 case WIDEN_MULT_PLUS_EXPR:
163 return (TYPE_UNSIGNED (type)
164 ? (TYPE_SATURATING (type)
165 ? usmadd_widen_optab : umadd_widen_optab)
166 : (TYPE_SATURATING (type)
167 ? ssmadd_widen_optab : smadd_widen_optab));
169 case WIDEN_MULT_MINUS_EXPR:
170 return (TYPE_UNSIGNED (type)
171 ? (TYPE_SATURATING (type)
172 ? usmsub_widen_optab : umsub_widen_optab)
173 : (TYPE_SATURATING (type)
174 ? ssmsub_widen_optab : smsub_widen_optab));
176 case VEC_WIDEN_MULT_HI_EXPR:
177 return (TYPE_UNSIGNED (type)
178 ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab);
180 case VEC_WIDEN_MULT_LO_EXPR:
181 return (TYPE_UNSIGNED (type)
182 ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab);
184 case VEC_WIDEN_MULT_EVEN_EXPR:
185 return (TYPE_UNSIGNED (type)
186 ? vec_widen_umult_even_optab : vec_widen_smult_even_optab);
188 case VEC_WIDEN_MULT_ODD_EXPR:
189 return (TYPE_UNSIGNED (type)
190 ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab);
192 case VEC_WIDEN_LSHIFT_HI_EXPR:
193 return (TYPE_UNSIGNED (type)
194 ? vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab);
196 case VEC_WIDEN_LSHIFT_LO_EXPR:
197 return (TYPE_UNSIGNED (type)
198 ? vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab);
200 case VEC_UNPACK_HI_EXPR:
201 return (TYPE_UNSIGNED (type)
202 ? vec_unpacku_hi_optab : vec_unpacks_hi_optab);
204 case VEC_UNPACK_LO_EXPR:
205 return (TYPE_UNSIGNED (type)
206 ? vec_unpacku_lo_optab : vec_unpacks_lo_optab);
208 case VEC_UNPACK_FLOAT_HI_EXPR:
209 /* The signedness is determined from input operand. */
210 return (TYPE_UNSIGNED (type)
211 ? vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab);
213 case VEC_UNPACK_FLOAT_LO_EXPR:
214 /* The signedness is determined from input operand. */
215 return (TYPE_UNSIGNED (type)
216 ? vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab);
218 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
219 /* The signedness is determined from output operand. */
220 return (TYPE_UNSIGNED (type)
221 ? vec_unpack_ufix_trunc_hi_optab
222 : vec_unpack_sfix_trunc_hi_optab);
224 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
225 /* The signedness is determined from output operand. */
226 return (TYPE_UNSIGNED (type)
227 ? vec_unpack_ufix_trunc_lo_optab
228 : vec_unpack_sfix_trunc_lo_optab);
230 case VEC_PACK_TRUNC_EXPR:
231 return vec_pack_trunc_optab;
233 case VEC_PACK_SAT_EXPR:
234 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
236 case VEC_PACK_FIX_TRUNC_EXPR:
237 /* The signedness is determined from output operand. */
238 return (TYPE_UNSIGNED (type)
239 ? vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab);
241 case VEC_PACK_FLOAT_EXPR:
242 /* The signedness is determined from input operand. */
243 return (TYPE_UNSIGNED (type)
244 ? vec_packu_float_optab : vec_packs_float_optab);
246 case VEC_DUPLICATE_EXPR:
247 return vec_duplicate_optab;
249 case VEC_SERIES_EXPR:
250 return vec_series_optab;
252 default:
253 break;
256 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
257 switch (code)
259 case PLUS_EXPR:
260 if (TYPE_SATURATING (type))
261 return TYPE_UNSIGNED (type) ? usadd_optab : ssadd_optab;
262 return trapv ? addv_optab : add_optab;
264 case MINUS_EXPR:
265 if (TYPE_SATURATING (type))
266 return TYPE_UNSIGNED (type) ? ussub_optab : sssub_optab;
267 return trapv ? subv_optab : sub_optab;
269 case MULT_EXPR:
270 if (TYPE_SATURATING (type))
271 return TYPE_UNSIGNED (type) ? usmul_optab : ssmul_optab;
272 return trapv ? smulv_optab : smul_optab;
274 case NEGATE_EXPR:
275 if (TYPE_SATURATING (type))
276 return TYPE_UNSIGNED (type) ? usneg_optab : ssneg_optab;
277 return trapv ? negv_optab : neg_optab;
279 case ABS_EXPR:
280 return trapv ? absv_optab : abs_optab;
282 case ABSU_EXPR:
283 return abs_optab;
284 default:
285 return unknown_optab;
289 /* Check whether an operation represented by CODE is a 'half' widening operation
290 in which the input vector type has half the number of bits of the output
291 vector type e.g. V8QI->V8HI.
293 This is handled by widening the inputs using NOP_EXPRs then using a
294 non-widening stmt e.g. MINUS_EXPR. RTL fusing converts these to the widening
295 hardware instructions if supported.
297 The more typical case (handled in supportable_widening_operation) is where
298 the input vector type has the same number of bits as the output vector type.
299 In this case half the elements of the input vectors must be processed at a
300 time into respective vector outputs with elements twice as wide i.e. a
301 'hi'/'lo' pair using codes such as VEC_WIDEN_MINUS_HI/LO.
303 Supported widening operations:
304 WIDEN_MULT_EXPR
305 WIDEN_LSHIFT_EXPR
307 Output:
308 - CODE1 - The non-widened code, which will be used after the inputs are
309 converted to the wide type. */
310 bool
311 supportable_half_widening_operation (enum tree_code code, tree vectype_out,
312 tree vectype_in, enum tree_code *code1)
314 machine_mode m1,m2;
315 enum tree_code dummy_code;
316 optab op;
318 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
320 m1 = TYPE_MODE (vectype_out);
321 m2 = TYPE_MODE (vectype_in);
323 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
324 return false;
326 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in),
327 TYPE_VECTOR_SUBPARTS (vectype_out)))
328 return false;
330 switch (code)
332 case WIDEN_LSHIFT_EXPR:
333 *code1 = LSHIFT_EXPR;
334 break;
335 case WIDEN_MULT_EXPR:
336 *code1 = MULT_EXPR;
337 break;
338 default:
339 return false;
342 if (!supportable_convert_operation (NOP_EXPR, vectype_out, vectype_in,
343 &dummy_code))
344 return false;
346 op = optab_for_tree_code (*code1, vectype_out, optab_vector);
347 return (optab_handler (op, TYPE_MODE (vectype_out)) != CODE_FOR_nothing);
350 /* Function supportable_convert_operation
352 Check whether an operation represented by the code CODE is a
353 convert operation that is supported by the target platform in
354 vector form (i.e., when operating on arguments of type VECTYPE_IN
355 producing a result of type VECTYPE_OUT).
357 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
358 This function checks if these operations are supported
359 by the target platform directly (via vector tree-codes).
361 Output:
362 - CODE1 is code of vector operation to be used when
363 vectorizing the operation, if available. */
365 bool
366 supportable_convert_operation (enum tree_code code,
367 tree vectype_out, tree vectype_in,
368 enum tree_code *code1)
370 machine_mode m1,m2;
371 bool truncp;
373 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
375 m1 = TYPE_MODE (vectype_out);
376 m2 = TYPE_MODE (vectype_in);
378 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
379 return false;
381 /* First check if we can done conversion directly. */
382 if ((code == FIX_TRUNC_EXPR
383 && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
384 != CODE_FOR_nothing)
385 || (code == FLOAT_EXPR
386 && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
387 != CODE_FOR_nothing))
389 *code1 = code;
390 return true;
393 if (GET_MODE_UNIT_PRECISION (m1) > GET_MODE_UNIT_PRECISION (m2)
394 && can_extend_p (m1, m2, TYPE_UNSIGNED (vectype_in)))
396 *code1 = code;
397 return true;
400 if (GET_MODE_UNIT_PRECISION (m1) < GET_MODE_UNIT_PRECISION (m2)
401 && convert_optab_handler (trunc_optab, m1, m2) != CODE_FOR_nothing)
403 *code1 = code;
404 return true;
407 return false;
410 /* Return true iff vec_cmp_optab/vec_cmpu_optab can handle a vector comparison
411 for code CODE, comparing operands of type VALUE_TYPE and producing a result
412 of type MASK_TYPE. */
414 static bool
415 vec_cmp_icode_p (tree value_type, tree mask_type, enum tree_code code)
417 enum rtx_code rcode = get_rtx_code_1 (code, TYPE_UNSIGNED (value_type));
418 if (rcode == UNKNOWN)
419 return false;
421 return can_vec_cmp_compare_p (rcode, TYPE_MODE (value_type),
422 TYPE_MODE (mask_type));
425 /* Return true iff vec_cmpeq_optab can handle a vector comparison for code
426 CODE, comparing operands of type VALUE_TYPE and producing a result of type
427 MASK_TYPE. */
429 static bool
430 vec_cmp_eq_icode_p (tree value_type, tree mask_type, enum tree_code code)
432 if (code != EQ_EXPR && code != NE_EXPR)
433 return false;
435 return get_vec_cmp_eq_icode (TYPE_MODE (value_type), TYPE_MODE (mask_type))
436 != CODE_FOR_nothing;
439 /* Return TRUE if appropriate vector insn is available
440 for vector comparison expr with vector type VALUE_TYPE
441 and resulting mask with MASK_TYPE. */
443 bool
444 expand_vec_cmp_expr_p (tree value_type, tree mask_type, enum tree_code code)
446 return vec_cmp_icode_p (value_type, mask_type, code)
447 || vec_cmp_eq_icode_p (value_type, mask_type, code);
450 /* Return TRUE iff, appropriate vector insns are available
451 for vector cond expr with vector type VALUE_TYPE and a comparison
452 with operand vector types in CMP_OP_TYPE. */
454 bool
455 expand_vec_cond_expr_p (tree value_type, tree cmp_op_type)
457 if (VECTOR_BOOLEAN_TYPE_P (cmp_op_type)
458 && get_vcond_mask_icode (TYPE_MODE (value_type),
459 TYPE_MODE (cmp_op_type)) != CODE_FOR_nothing)
460 return true;
462 return false;
465 /* Use the current target and options to initialize
466 TREE_OPTIMIZATION_OPTABS (OPTNODE). */
468 void
469 init_tree_optimization_optabs (tree optnode)
471 /* Quick exit if we have already computed optabs for this target. */
472 if (TREE_OPTIMIZATION_BASE_OPTABS (optnode) == this_target_optabs)
473 return;
475 /* Forget any previous information and set up for the current target. */
476 TREE_OPTIMIZATION_BASE_OPTABS (optnode) = this_target_optabs;
477 struct target_optabs *tmp_optabs = (struct target_optabs *)
478 TREE_OPTIMIZATION_OPTABS (optnode);
479 if (tmp_optabs)
480 memset (tmp_optabs, 0, sizeof (struct target_optabs));
481 else
482 tmp_optabs = ggc_cleared_alloc<target_optabs> ();
484 /* Generate a new set of optabs into tmp_optabs. */
485 init_all_optabs (tmp_optabs);
487 /* If the optabs changed, record it. */
488 if (memcmp (tmp_optabs, this_target_optabs, sizeof (struct target_optabs)))
489 TREE_OPTIMIZATION_OPTABS (optnode) = tmp_optabs;
490 else
492 TREE_OPTIMIZATION_OPTABS (optnode) = NULL;
493 ggc_free (tmp_optabs);
497 /* Return TRUE if the target has support for vector right shift of an
498 operand of type TYPE. If OT_TYPE is OPTAB_DEFAULT, check for existence
499 of a shift by either a scalar or a vector. Otherwise, check only
500 for a shift that matches OT_TYPE. */
502 bool
503 target_supports_op_p (tree type, enum tree_code code,
504 enum optab_subtype ot_subtype)
506 optab ot = optab_for_tree_code (code, type, ot_subtype);
507 return ot != unknown_optab && can_implement_p (ot, TYPE_MODE (type));
510 /* Return true if the target has support for masked load/store.
511 We can support masked load/store by either mask{load,store}
512 or mask_len_{load,store}.
513 This helper function checks whether target supports masked
514 load/store and return corresponding IFN in the last argument
515 (IFN_MASK_{LOAD,STORE} or IFN_MASK_LEN_{LOAD,STORE}).
516 If there is support and ELSVALS is nonzero store the possible else values
517 in the vector it points to. */
519 bool
520 target_supports_mask_load_store_p (machine_mode mode, machine_mode mask_mode,
521 bool is_load, internal_fn *ifn,
522 vec<int> *elsvals)
524 optab op = is_load ? maskload_optab : maskstore_optab;
525 optab len_op = is_load ? mask_len_load_optab : mask_len_store_optab;
526 enum insn_code icode;
527 if ((icode = convert_optab_handler (op, mode, mask_mode))
528 != CODE_FOR_nothing)
530 if (ifn)
531 *ifn = is_load ? IFN_MASK_LOAD : IFN_MASK_STORE;
532 if (elsvals && is_load)
533 get_supported_else_vals (icode,
534 internal_fn_else_index (IFN_MASK_LOAD),
535 *elsvals);
536 return true;
538 else if ((icode = convert_optab_handler (len_op, mode, mask_mode))
539 != CODE_FOR_nothing)
541 if (ifn)
542 *ifn = is_load ? IFN_MASK_LEN_LOAD : IFN_MASK_LEN_STORE;
543 if (elsvals && is_load)
544 get_supported_else_vals (icode,
545 internal_fn_else_index (IFN_MASK_LEN_LOAD),
546 *elsvals);
547 return true;
549 return false;
552 /* Return true if target supports vector masked load/store for mode.
553 An additional output in the last argument which is the IFN pointer.
554 We set IFN as MASK_{LOAD,STORE} or MASK_LEN_{LOAD,STORE} according
555 which optab is supported in the target.
556 If there is support and ELSVALS is nonzero store the possible else values
557 in the vector it points to. */
559 bool
560 can_vec_mask_load_store_p (machine_mode mode,
561 machine_mode mask_mode,
562 bool is_load,
563 internal_fn *ifn,
564 vec<int> *elsvals)
566 machine_mode vmode;
568 /* If mode is vector mode, check it directly. */
569 if (VECTOR_MODE_P (mode))
570 return target_supports_mask_load_store_p (mode, mask_mode, is_load, ifn,
571 elsvals);
573 /* Otherwise, return true if there is some vector mode with
574 the mask load/store supported. */
576 /* See if there is any chance the mask load or store might be
577 vectorized. If not, punt. */
578 scalar_mode smode;
579 if (!is_a <scalar_mode> (mode, &smode))
580 return false;
582 vmode = targetm.vectorize.preferred_simd_mode (smode);
583 if (VECTOR_MODE_P (vmode)
584 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
585 && target_supports_mask_load_store_p (vmode, mask_mode, is_load, ifn,
586 elsvals))
587 return true;
589 auto_vector_modes vector_modes;
590 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
591 for (machine_mode base_mode : vector_modes)
592 if (related_vector_mode (base_mode, smode).exists (&vmode)
593 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
594 && target_supports_mask_load_store_p (vmode, mask_mode, is_load, ifn,
595 elsvals))
596 return true;
597 return false;
600 /* Return true if the target has support for len load/store.
601 We can support len load/store by either len_{load,store}
602 or mask_len_{load,store}.
603 This helper function checks whether target supports len
604 load/store and return corresponding IFN in the last argument
605 (IFN_LEN_{LOAD,STORE} or IFN_MASK_LEN_{LOAD,STORE}).
606 If there is support and ELSVALS is nonzero store thepossible
607 else values in the vector it points to. */
609 static bool
610 target_supports_len_load_store_p (machine_mode mode, bool is_load,
611 internal_fn *ifn, vec<int> *elsvals)
613 optab op = is_load ? len_load_optab : len_store_optab;
614 optab masked_op = is_load ? mask_len_load_optab : mask_len_store_optab;
616 if (direct_optab_handler (op, mode))
618 if (ifn)
619 *ifn = is_load ? IFN_LEN_LOAD : IFN_LEN_STORE;
620 return true;
622 machine_mode mask_mode;
623 enum insn_code icode;
624 if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
625 && ((icode = convert_optab_handler (masked_op, mode, mask_mode))
626 != CODE_FOR_nothing))
628 if (ifn)
629 *ifn = is_load ? IFN_MASK_LEN_LOAD : IFN_MASK_LEN_STORE;
630 if (elsvals && is_load)
631 get_supported_else_vals (icode,
632 internal_fn_else_index (IFN_MASK_LEN_LOAD),
633 *elsvals);
634 return true;
636 return false;
639 /* If target supports vector load/store with length for vector mode MODE,
640 return the corresponding vector mode, otherwise return opt_machine_mode ().
641 There are two flavors for vector load/store with length, one is to measure
642 length with bytes, the other is to measure length with lanes.
643 As len_{load,store} optabs point out, for the flavor with bytes, we use
644 VnQI to wrap the other supportable same size vector modes.
645 An additional output in the last argument which is the IFN pointer.
646 We set IFN as LEN_{LOAD,STORE} or MASK_LEN_{LOAD,STORE} according
647 which optab is supported in the target.
648 If there is support and ELSVALS is nonzero store the possible else values
649 in the vector it points to. */
651 opt_machine_mode
652 get_len_load_store_mode (machine_mode mode, bool is_load, internal_fn *ifn,
653 vec<int> *elsvals)
655 gcc_assert (VECTOR_MODE_P (mode));
657 /* Check if length in lanes supported for this mode directly. */
658 if (target_supports_len_load_store_p (mode, is_load, ifn, elsvals))
659 return mode;
661 /* Check if length in bytes supported for same vector size VnQI. */
662 machine_mode vmode;
663 poly_uint64 nunits = GET_MODE_SIZE (mode);
664 if (related_vector_mode (mode, QImode, nunits).exists (&vmode)
665 && target_supports_len_load_store_p (vmode, is_load, ifn, elsvals))
666 return vmode;
668 return opt_machine_mode ();