1 /* { dg-require-effective-target vect_float } */
2 /* { dg-additional-options "-fdump-tree-optimized-details-blocks" } */
9 __attribute__ ((noinline
))
10 void bar (float *pd
, float *pa
, float *pb
, float *pc
)
16 for (i
= 0; i
< N
; i
++)
18 if (pa
[i
] != (pb
[i
] * pc
[i
]))
28 __attribute__ ((noinline
)) int
29 main1 (int n
, float * __restrict__ pd
, float * __restrict__ pa
, float * __restrict__ pb
, float * __restrict__ pc
)
33 for (i
= 0; i
< n
; i
++)
35 pa
[i
] = pb
[i
] * pc
[i
];
47 float a
[N
] __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__
)));
48 float d
[N
+1] __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__
)));
49 float b
[N
] = {0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57};
50 float c
[N
] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19};
54 main1 (N
,&d
[1],a
,b
,c
);
55 main1 (N
-2,&d
[1],a
,b
,c
);
60 /* For targets that support unaligned loads we version for the two unaligned
61 stores and generate misaligned accesses for the loads. For targets that
62 don't support unaligned loads we version for all four accesses. */
64 /* { dg-final { scan-tree-dump-times "Vectorizing an unaligned access" 2 "vect" { xfail { vect_no_align || vect_element_align} } } } */
65 /* { dg-final { scan-tree-dump-times "Alignment of access forced using versioning" 2 "vect" { xfail { vect_no_align || vect_element_align } } } } */
66 /* { dg-final { scan-tree-dump-times "Vectorizing an unaligned access" 0 "vect" { target { vect_no_align && { ! vect_hw_misalign } } } } } */
67 /* { dg-final { scan-tree-dump-times "Alignment of access forced using versioning" 4 "vect" { target { vect_no_align && { ! vect_hw_misalign } } } } } */
68 /* { dg-final { scan-tree-dump-not "Invalid sum" "optimized" } } */