@@ -1663,14 +1663,23 @@ ctr_crypt_s:
1663
1663
pop {r1}
1664
1664
ldmia r1 , { r8 - r11 } @ r8 - r11 = IVshareB
1665
1665
clear03 32
1666
- bl gen_rand_sha_nonpres ; eors r4,r4,r0; mov r8, r8, ror#16; eor r8, r8, r0,ror#16
1667
- bl gen_rand_sha_nonpres ; eors r5,r5,r0; mov r9, r9, ror#16; eor r9, r9, r0,ror#16
1668
- bl gen_rand_sha_nonpres ; eors r6,r6,r0; mov r10,r10,ror#16; eor r10,r10,r0,ror#16
1669
- bl gen_rand_sha_nonpres ; eors r7,r7,r0; mov r11,r11,ror#16; eor r11,r11,r0,ror#16
1666
+ bl gen_rand_sha_nonpres ; eors r4,r4,r0; movs r1,#0; mov r8, r8, ror#16; eor r8, r8, r0,ror#16 @ Barriers between shares to prevent implicit r4^r8 etc
1667
+ bl gen_rand_sha_nonpres ; eors r5,r5,r0; movs r1,#0; mov r9, r9, ror#16; eor r9, r9, r0,ror#16
1668
+ bl gen_rand_sha_nonpres ; eors r6,r6,r0; movs r1,#0; mov r10,r10,ror#16; eor r10,r10,r0,ror#16
1669
+ bl gen_rand_sha_nonpres ; eors r7,r7,r0; movs r1,#0; mov r11,r11,ror#16; eor r11,r11,r0,ror#16
1670
1670
ldr r0 , =IV0
1671
1671
stmia r0 , {r4 - r7}
1672
1672
adds r0 , r0 , # 20
1673
1673
stmia r0 , { r8 - r11 }
1674
+ @ "Decommission" IV0 so th at it doesn't get stacked
1675
+ bl gen_rand_sha_nonpres ; movs r4,r0
1676
+ bl gen_rand_sha_nonpres ; movs r5,r0
1677
+ bl gen_rand_sha_nonpres ; movs r6,r0
1678
+ bl gen_rand_sha_nonpres ; movs r7,r0
1679
+ bl gen_rand_sha_nonpres ; mov r8,r0
1680
+ bl gen_rand_sha_nonpres ; mov r9,r0
1681
+ bl gen_rand_sha_nonpres ; mov r10,r0
1682
+ bl gen_rand_sha_nonpres ; mov r11,r0
1674
1683
pop {r1 , r2}
1675
1684
@ r1=cipher/plaintext buffer , r2=number of blocks
1676
1685
0 commit comments