@@ -369,3 +369,189 @@ loop.latch:
369
369
exit:
370
370
ret void
371
371
}
372
+
373
+ ; %a is known dereferenceable via assume for the whole loop, alignment is known via function attribute.
374
+ define void @deref_assumption_in_preheader_non_constant_trip_count_access_i32_align_attribute (ptr noalias noundef align 4 %a , ptr noalias %b , ptr noalias %c , i64 %n ) nofree nosync {
375
+ ; CHECK-LABEL: define void @deref_assumption_in_preheader_non_constant_trip_count_access_i32_align_attribute(
376
+ ; CHECK-SAME: ptr noalias noundef align 4 [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR1]] {
377
+ ; CHECK-NEXT: [[ENTRY:.*]]:
378
+ ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i64 [[N]], 4
379
+ ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 [[MUL]]) ]
380
+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2
381
+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
382
+ ; CHECK: [[VECTOR_PH]]:
383
+ ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 2
384
+ ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
385
+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
386
+ ; CHECK: [[VECTOR_BODY]]:
387
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
388
+ ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
389
+ ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
390
+ ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0
391
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4
392
+ ; CHECK-NEXT: [[TMP3:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer
393
+ ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP0]], i32 0
394
+ ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i32>, ptr [[TMP4]], align 4
395
+ ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]]
396
+ ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
397
+ ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i32 0
398
+ ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP6]], align 1
399
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
400
+ ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
401
+ ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
402
+ ; CHECK: [[MIDDLE_BLOCK]]:
403
+ ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
404
+ ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
405
+ ; CHECK: [[SCALAR_PH]]:
406
+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
407
+ ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
408
+ ; CHECK: [[LOOP_HEADER]]:
409
+ ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
410
+ ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
411
+ ; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
412
+ ; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4
413
+ ; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0
414
+ ; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]]
415
+ ; CHECK: [[LOOP_THEN]]:
416
+ ; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4
417
+ ; CHECK-NEXT: br label %[[LOOP_LATCH]]
418
+ ; CHECK: [[LOOP_LATCH]]:
419
+ ; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ]
420
+ ; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
421
+ ; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1
422
+ ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
423
+ ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
424
+ ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]]
425
+ ; CHECK: [[EXIT]]:
426
+ ; CHECK-NEXT: ret void
427
+ ;
428
+ entry:
429
+ %mul = mul nsw nuw i64 %n , 4
430
+ call void @llvm.assume (i1 true ) [ "dereferenceable" (ptr %a , i64 %mul ) ]
431
+ br label %loop.header
432
+
433
+ loop.header:
434
+ %iv = phi i64 [ 0 , %entry ], [ %iv.next , %loop.latch ]
435
+ %gep.a = getelementptr inbounds i32 , ptr %a , i64 %iv
436
+ %gep.b = getelementptr inbounds i32 , ptr %b , i64 %iv
437
+ %l.b = load i32 , ptr %gep.b , align 4
438
+ %c.1 = icmp sge i32 %l.b , 0
439
+ br i1 %c.1 , label %loop.latch , label %loop.then
440
+
441
+ loop.then:
442
+ %l.a = load i32 , ptr %gep.a , align 4
443
+ br label %loop.latch
444
+
445
+ loop.latch:
446
+ %merge = phi i32 [ %l.a , %loop.then ], [ %l.b , %loop.header ]
447
+ %gep.c = getelementptr inbounds i32 , ptr %c , i64 %iv
448
+ store i32 %merge , ptr %gep.c , align 1
449
+ %iv.next = add nuw nsw i64 %iv , 1
450
+ %ec = icmp eq i64 %iv.next , %n
451
+ br i1 %ec , label %exit , label %loop.header
452
+
453
+ exit:
454
+ ret void
455
+ }
456
+
457
+ ; Alignment via argument attribute is too small (1 but needs 4).
458
+ define void @deref_assumption_in_preheader_non_constant_trip_count_access_i32_align_attribute_too_small (ptr noalias noundef align 1 %a , ptr noalias %b , ptr noalias %c , i64 %n ) nofree nosync {
459
+ ; CHECK-LABEL: define void @deref_assumption_in_preheader_non_constant_trip_count_access_i32_align_attribute_too_small(
460
+ ; CHECK-SAME: ptr noalias noundef align 1 [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR1]] {
461
+ ; CHECK-NEXT: [[ENTRY:.*]]:
462
+ ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i64 [[N]], 4
463
+ ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 [[MUL]]) ]
464
+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2
465
+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
466
+ ; CHECK: [[VECTOR_PH]]:
467
+ ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 2
468
+ ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
469
+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
470
+ ; CHECK: [[VECTOR_BODY]]:
471
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ]
472
+ ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
473
+ ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 0
474
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP1]], align 4
475
+ ; CHECK-NEXT: [[TMP2:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer
476
+ ; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true)
477
+ ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP3]], i32 0
478
+ ; CHECK-NEXT: br i1 [[TMP4]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]]
479
+ ; CHECK: [[PRED_LOAD_IF]]:
480
+ ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
481
+ ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP5]]
482
+ ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
483
+ ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i32 0
484
+ ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]]
485
+ ; CHECK: [[PRED_LOAD_CONTINUE]]:
486
+ ; CHECK-NEXT: [[TMP9:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP8]], %[[PRED_LOAD_IF]] ]
487
+ ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1
488
+ ; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]]
489
+ ; CHECK: [[PRED_LOAD_IF1]]:
490
+ ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1
491
+ ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
492
+ ; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
493
+ ; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP9]], i32 [[TMP13]], i32 1
494
+ ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]]
495
+ ; CHECK: [[PRED_LOAD_CONTINUE2]]:
496
+ ; CHECK-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP9]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ]
497
+ ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP15]]
498
+ ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
499
+ ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 0
500
+ ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP17]], align 1
501
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
502
+ ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
503
+ ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
504
+ ; CHECK: [[MIDDLE_BLOCK]]:
505
+ ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
506
+ ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
507
+ ; CHECK: [[SCALAR_PH]]:
508
+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
509
+ ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
510
+ ; CHECK: [[LOOP_HEADER]]:
511
+ ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
512
+ ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
513
+ ; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
514
+ ; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4
515
+ ; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0
516
+ ; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]]
517
+ ; CHECK: [[LOOP_THEN]]:
518
+ ; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4
519
+ ; CHECK-NEXT: br label %[[LOOP_LATCH]]
520
+ ; CHECK: [[LOOP_LATCH]]:
521
+ ; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ]
522
+ ; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
523
+ ; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1
524
+ ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
525
+ ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
526
+ ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]]
527
+ ; CHECK: [[EXIT]]:
528
+ ; CHECK-NEXT: ret void
529
+ ;
530
+ entry:
531
+ %mul = mul nsw nuw i64 %n , 4
532
+ call void @llvm.assume (i1 true ) [ "dereferenceable" (ptr %a , i64 %mul ) ]
533
+ br label %loop.header
534
+
535
+ loop.header:
536
+ %iv = phi i64 [ 0 , %entry ], [ %iv.next , %loop.latch ]
537
+ %gep.a = getelementptr inbounds i32 , ptr %a , i64 %iv
538
+ %gep.b = getelementptr inbounds i32 , ptr %b , i64 %iv
539
+ %l.b = load i32 , ptr %gep.b , align 4
540
+ %c.1 = icmp sge i32 %l.b , 0
541
+ br i1 %c.1 , label %loop.latch , label %loop.then
542
+
543
+ loop.then:
544
+ %l.a = load i32 , ptr %gep.a , align 4
545
+ br label %loop.latch
546
+
547
+ loop.latch:
548
+ %merge = phi i32 [ %l.a , %loop.then ], [ %l.b , %loop.header ]
549
+ %gep.c = getelementptr inbounds i32 , ptr %c , i64 %iv
550
+ store i32 %merge , ptr %gep.c , align 1
551
+ %iv.next = add nuw nsw i64 %iv , 1
552
+ %ec = icmp eq i64 %iv.next , %n
553
+ br i1 %ec , label %exit , label %loop.header
554
+
555
+ exit:
556
+ ret void
557
+ }
0 commit comments