-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkernel.s
1331 lines (1090 loc) · 21.5 KB
/
kernel.s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
########################
# CONSTANT DEFINITIONS #
########################
.equiv EXEC_PAGE_SCHED_NEXT_OFFSET, 0x100
.equiv EXEC_PAGE_VAS_PADDR_OFFSET, 0x118
.equiv EXEC_PAGE_STATUS_OFFSET, 0x200
.equiv EXEC_PAGE_REFCOUNT_OFFSET, 0x400
.equiv EXEC_PAGE_MCS_NEXT_OFFSET, 0x600
.equiv EXEC_PAGE_MCS_LOCKED_OFFSET, 0x700
.equiv EXEC_TERMINATED, 1
.equiv VAS_REFCOUNT_OFFSET, 0x800
.equiv VAS_REFCOUNT_UNIT, 2
.equiv GLOBAL_PAGE_ALLOC_HEAD_OFFSET, 0x000
.equiv GLOBAL_PAGE_ALLOC_TAIL_OFFSET, 0x200
.equiv GLOBAL_PAGE_SCHED_MCS_OFFSET, 0x400
.equiv GLOBAL_PAGE_SCHED_HEAD_OFFSET, 0x600
.equiv GLOBAL_PAGE_SCHED_TAIL_OFFSET, 0x608
.equiv GLOBAL_PAGE_KERNEL_SATP_OFFSET, 0x700
# .equiv HART_PAGE_MCS_NEXT_OFFSET, 0x000
# .equiv HART_PAGE_MCS_LOCKED_OFFSET, 0x200
# MUST NOT be a multiple of PAGE_SIZE
.equiv MCS_EMPTY, 1
# .equiv SANITY_CHECKS, 1
# use only half because risky
.equiv PAGE_COUNT, 16000
.equiv TEXT_PAGE_ALIGNED_LEN, 4096
.equiv PAGE_TABLE_OFFSET, (4096 * ((PAGE_COUNT + 255) / 256))
.equiv PAGE_SIZE, 4096
.equiv SYSCALL_PARK, 2
.equiv SYSCALL_YIELD, 10
#####################
# MACRO DEFINITIONS #
#####################
# Lock the mcs lock pointed to by t0.
# Not reentrant.
#
# in:
# t0 = mcs lock address
# t1 = MCS_EMPTY
# out:
# t2 = undef
.macro MCS_LOCK
# TODO: release for zeroing to be visible
# TODO: interrupts?
# set queue end to current hart
amoswap.d t2, fp, (t0)
# if the queue was empty, exit
beq t1, t2, .L\@_end
# set next of prev to current hart
sd fp, EXEC_PAGE_MCS_NEXT_OFFSET(t2)
.L\@_try_lock:
# wait for prev to give lock
# TODO: spin loop hint
# pause
ld t2, EXEC_PAGE_MCS_LOCKED_OFFSET(fp)
beqz t2, .L\@_try_lock
.L\@_end:
# acquire fence
# TODO: can this be relaxed to r,r
fence r,rw
.endm # MCS_LOCK
# Unlock the mcs lock pointed to by t0
#
# in:
# t0 = mcs lock address
# t1 = MCS_EMPTY
# out:
# t2 = undef
.macro MCS_UNLOCK
.L\@_cas:
# try to clear queue if cur is last
lr.d t2, (t0)
# cur != last => queue not empty
bne t2, fp, .L\@_ld_next
sc.d.rl t2, t1, (t0)
# cas failed, try again
bnez t2, .L\@_cas
# TODO: fence here?
# success, exit
j .L\@_end
.L\@_ld_next:
# load next in queue, loop while
# waiting for next hart to update next ptr
ld t2, EXEC_PAGE_MCS_NEXT_OFFSET(fp)
beq t2, t1, .L\@_ld_next
# release store, unlock lock for next
fence rw, w
sd t1, EXEC_PAGE_MCS_LOCKED_OFFSET(t2)
# reset hart lock memory vars
sd zero, EXEC_PAGE_MCS_LOCKED_OFFSET(fp)
sd t1, EXEC_PAGE_MCS_NEXT_OFFSET(fp)
.L\@_end:
.endm # MCS_UNLOCK
# Store zero to the PAGE_SIZE bytes pointed to by t0.
# The pointer t0 must aligne to PAGE_SIZE
#
# in:
# t0 = page to zero
# out:
# t0 unchanged
# t1 clobbered
.macro ZERO_PAGE
li t1, 64
.L\@_loop:
sd zero, 0x00(t0)
sd zero, 0x08(t0)
sd zero, 0x10(t0)
sd zero, 0x18(t0)
sd zero, 0x20(t0)
sd zero, 0x28(t0)
sd zero, 0x30(t0)
sd zero, 0x38(t0)
addi t0, t0, 0x40
addi t1, t1, -1
bnez t1, .L\@_loop
li t1, -PAGE_SIZE
add t0, t0, t1
.endm # ZERO_PAGE
# Free the page pointed to by t0
#
# in:
# t0 = paddr of page to be freed
# t1 = start of ram segment
# out:
#
.macro FREE_PAGE
# TODO: is it safe to have the allocation struct fixed
# convert raw address to ptr to node in link table
sub t0, t0, t1
srli t0, t0, 9
add t0, t0, t1
# set next to null and set tail to cur
sd zero, (t0)
amoswap.d.rl t1, t0, GLOBAL_PAGE_ALLOC_TAIL_OFFSET(gp)
sd t0, (t1)
.endm # FREE_PAGE
# decrement the refcount of the page pointed to by t0
#
# in:
# t0 = page to be freed
# t1 = start of ram segment
# out:
#
.macro DEC_PAGE_REFCOUNT
# convert raw address to ptr to node in link table
sub t0, t0, t1
srli t0, t0, 9
add t0, t0, t1
li t1, -1
amoadd.d.rl t1, t1, (t0)
beqz t1, .L\@_end
fence r, rw
# set next to null and set tail to cur
sd zero, (t0)
amoswap.d.rl t1, t0, GLOBAL_PAGE_ALLOC_TAIL_OFFSET(gp)
sd t0, (t1)
.L\@_end:
.endm # FREE_PAGE
.macro ALLOC_PAGE
# i guess use this when filling page tables
# TODO; restore reserve first???
# a2 = reserve
# a3 = n
# a4 = out
.L\@_pop:
li a6, 0
sd x0, (a2)
amoswap.d.aqrl t0, a2, GLOBAL_PAGE_ALLOC_TAIL_OFFSET(gp)
.L\@_fst_again:
ld t1, GLOBAL_PAGE_ALLOC_HEAD_OFFSET(gp)
.L\@_snd_again:
ld t2, (t1)
beqz t2, .L\@_snd_zero
.L\@_cas:
lr.d t3, GLOBAL_PAGE_ALLOC_HEAD_OFFSET(gp)
bne t3, t1, .L\@_cas_ne
sc.d t4, t2, GLOBAL_PAGE_ALLOC_HEAD_OFFSET(gp)
bnez t4, .L\@_cas
beq a6, a3, .L\@_restore_reserve
addi a6, a6, 1
sd t1, (a4)
addi a4, a4, 8
mv t1, t2
j .L\@_snd_again
.L\@_cas_ne:
mv t1, t3
j .L\@_snd_again
.L\@_snd_zero:
bne t0, t1, .L\@_fst_again
.L\@_restore_reserve:
sd a2, (t0)
mv a2, t1
# TODO: wtf is this ret
ret # a6 contains m
.endm # ALLOC_PAGE
# a2 = reserve
# a3 = n
# a4 = out
.macro ALLOC_SINGLE_PAGE
# TODO; restore reserve first???
ld a6, 0(a0)
sd zero, 0(a6)
addi a2, a1, 8
amoswap.d.aqrl a7, a6, (a2)
.L\@_BB4_1:
ld a5, 0(a1)
beq a5, a7, .L\@_BB4_10
ld a2, 0(a5)
beqz a2, .L\@_BB4_1
.L\@_BB4_11:
lr.d a3, (a1)
bne a3, a5, .L\@_BB4_1
sc.d a4, a2, (a1)
bnez a4, .L\@_BB4_11
sd zero, 0(a5)
sd a5, 0(a0)
.L\@_BB4_5:
ld a2, 0(a1)
beq a2, a7, .L\@_BB4_9
ld a0, 0(a2)
beqz a0, .L\@_BB4_5
.L\@_BB4_14:
lr.d a3, (a1)
bne a3, a2, .L\@_BB4_5
sc.d a4, a0, (a1)
bnez a4, .L\@_BB4_14
mv a1, a7
.L\@_BB4_9:
sd a6, 0(a1)
mv a0, a2
ret
.L\@_BB4_10:
li a2, 0
sd a5, 0(a0)
sd a6, 0(a1)
mv a0, a2
ret
/*
.L\@_pop:
li a6, 0
sd x0, (a2)
amoswap.d.aqrl t0, a2, GLOBAL_PAGE_ALLOC_TAIL_OFFSET(gp)
.L\@_fst_again:
ld t1, GLOBAL_PAGE_ALLOC_HEAD_OFFSET(gp)
.L\@_snd_again:
ld t2, (t1)
beqz t2, .L\@_snd_zero
.L\@_cas:
lr.d t3, GLOBAL_PAGE_ALLOC_HEAD_OFFSET(gp)
bne t3, t1, .L\@_cas_ne
sc.d t4, t2, GLOBAL_PAGE_ALLOC_HEAD_OFFSET(gp)
bnez t4, .L\@_cas
beq a6, a3, .L\@_restore_reserve
addi a6, a6, 1
sd t1, (a4)
addi a4, a4, 8
mv t1, t2
j .L\@_snd_again
.L\@_cas_ne:
mv t1, t3
j .L\@_snd_again
.L\@_snd_zero:
bne t0, t1, .L\@_fst_again
.L\@_restore_reserve:
sd a2, (t0)
mv a2, t1
# a6 contains m
*/
.endm # ALLOC_SINGLE_PAGE
# in:
# t0 = pointer to PTE
# t1 = start of ram segment
# t2 = head of freed pages
.macro UNMAP_PTE
# store PTE PPN mask in t3
lui t3, 1048320
srli t3, t3, 10
ld a0, (t0)
sd zero, (t0)
andi t0, a0, 1
beqz t0, .L\@_end
and a0, a0, t3
slli t0, t0, 2
sub t0, t0, t1
srli t0, t0, 9
add t0, t0, t1
sd t2, (t0)
mv t2, t0
.L\@_end:
# .ifdef SANITY_CHECKS
# # check D,A,G,U bits
# andi x2, x3, 0b11110000
# bnez x2, kernel_panic
# # check bits above PPN
# srli x2, x3, 53
# bnez x2, kernel_panic
# .endif
.endm # UNMAP_PTE
.macro TO_KERNEL_SATP
ld t0, GLOBAL_PAGE_KERNEL_SATP_OFFSET(gp)
csrw satp, t0
.endm # TO_KERNEL_SATP
.macro READ_BE_32
lbu a1, (x1)
lbu x2, 1(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 2(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 3(x1)
slli a1, a1, 8
add a1, a1, x2
.endm
.macro READ_BE_64
lbu a1, (x1)
lbu x2, 1(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 2(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 3(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 4(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 5(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 6(x1)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 7(x1)
slli a1, a1, 8
add a1, a1, x2
.endm
# a1 = number to print, starting from high bit
# x2 = num bits
.macro PRINT_BIN
1:
srli a0, a1, 63
addi a0, a0, '0'
li a7, 1
ecall
slli a1, a1, 1
addi x2, x2, -1
bnez x2, 1b
li a0, 10
ecall
.endm
.macro PRINT_BIN_8
slli a1, a1, 56
li x2, 8
PRINT_BIN
.endm
.macro PRINT_BIN_32
slli a1, a1, 32
li x2, 32
PRINT_BIN
.endm
.macro PRINT_BIN_64
li x2, 64
PRINT_BIN
.endm
.macro PRINT_ASCII
1:
lbu a0, (x1)
beqz a0, 1f
ecall
addi x1, x1, 1
j 1b
1:
.endm
.global _start
_start:
# print hartid?
# mv a1, a0
# PRINT_BIN_64
.equiv DRAM_START, 0x80200000
# TODO: overkill, 64kb
.equiv KERNEL_PAGE_COUNT, 16
.equiv MEM_PAGE_COUNT, 32768 - 512
.equiv PAGE_TABLE_PAGE_COUNT, 63 # = (32768 - 512) / 512
.equiv PREALLOCATED_PAGE_COUNT, KERNEL_PAGE_COUNT + PAGE_TABLE_PAGE_COUNT
#.equiv KERNEL_BASE_VADDR, 0xfffffffffff00000
.equiv KERNEL_BASE_VADDR, 0xffffffcaaaa80000
# lla x17, handle_exception
# csrw stvec, x17
# csrr x16, stvec
# bne x16, x17, kernel_panic
csrw sie, zero
csrw sip, zero
lla t0, _start
li t1, DRAM_START
bne t0, t1, kernel_panic
li t1, KERNEL_PAGE_COUNT * PAGE_SIZE
add t0, t0, t1
# TODO: check ASID count
# zero out preallocated (kernel text + page table) pages
li t1, PREALLOCATED_PAGE_COUNT
1:
sd zero, (t0)
addi t0, t0, 8
addi t1, t1, -1
bnez t1, 1b
# set up rest of the pages for allocation
li t1, MEM_PAGE_COUNT - PREALLOCATED_PAGE_COUNT - 1
# li t0, DRAM_START + PREALLOCATED_PAGE_COUNT * PAGE_SIZE
addi t0, t0, 8
1:
sd t0, -8(t0)
addi t0, t0, 8
addi t1, t1, -1
bnez t1, 1b
sd zero, -8(t0)
# get reserve page for page allocation
li t1, DRAM_START + PREALLOCATED_PAGE_COUNT * PAGE_SIZE + PREALLOCATED_PAGE_COUNT * 8
li t2, DRAM_START + PREALLOCATED_PAGE_COUNT * PAGE_SIZE
# allocate a page
mv t0, t1
sub t0, t0, t2
slli t0, t0, 9
li t3, DRAM_START
add t0, t0, t3
sd zero, (t1)
addi t1, t1, 8
addi s8, t1, -8
mv s9, t0
# t0 now has a fresh page
# TODO: make these not executable later
# PTE: DAGUXWRV
li t3, 0b11001111
li t4, 1
slli t4, t4, 28
li t5, 256 # half of ptes
# identity map lower half
1:
sd t3, (t0)
addi t0, t0, 8
add t3, t3, t4
addi t5, t5, -1
bnez t5, 1b
# s0 = root table address
addi s0, t0, -2048
mv s4, s0
li s1, KERNEL_BASE_VADDR
# TODO: this assumes sv39
# VPN[2]
srli t3, s1, 30
andi t3, t3, 0x1ff
slli t3, t3, 3
# make t3 a pointer to the first pte
add t3, t3, s0
# t2 = allocated page
li s10, PAGE_SIZE
add t2, s9, s10
add s9, s9, s10
addi s8, s8, 8
mv t0, t2
ZERO_PAGE
srli t4, t2, 2
ori t4, t4, (1<<5) + 1 # G and V bits
sd t4, (t3)
# VPN[1]
srli t3, s1, 21
andi t3, t3, 0x1ff
slli t3, t3, 3
# make t2 a pointer to the second pte
add t2, t2, t3
# t4 = allocated page
add t4, s9, s10
add s9, s9, s10
addi s8, s8, 8
mv t0, t4
ZERO_PAGE
srli t5, t4, 2
ori t5, t5, (1<<5) + 1 # G and V bits
sd t5, (t2)
# VPN[0]
srli t3, s1, 12
andi t3, t3, 0x1ff
slli t3, t3, 3
# make t3 a pointer to the third pte
add t3, t3, t4
# zero out top 8 bits
li s1, DRAM_START
srli s1, s1, 2
# PTE: DAGUXWRV
ori s1, s1, 0b11101001
li t2, KERNEL_PAGE_COUNT # TODO: how many pages are needed
1:
sd s1, (t3)
addi s1, s1, 1024
addi t3, t3, 8
addi t2, t2, -1
bnez t2, 1b
# t4 = allocated page
add t4, s9, s10
add s9, s9, s10
addi s8, s8, 8
# mv t0, t4
# ZERO_PAGE
srli t4, t4, 2
# PTE: DAGUXWRV
ori s1, t4, 0b11100111 # make the global page RW
sd s1, (t3)
# s0 = to be new satp
srli s0, s0, 12
li t1, 1
slli t1, t1, 63
add s0, s0, t1
# set all asid bits
li t6, 0xffff
slli t6, t6, 44
add s0, s0, t6
# mv a1, s0
# PRINT_BIN_64
csrw satp, s0
csrr a1, satp
# PRINT_BIN_64
li t1, KERNEL_BASE_VADDR
srli t3, t1, 30
andi t3, t3, 0x1ff
slli t3, t3, 3
add a1, t3, s4
# PRINT_BIN_64
# TODO: after all allocations, fixup allocation table
sd zero, (s8)
addi s8, s8, 8
# s8 is now the start of the allocation table
lla a5, foo_test
li a3, KERNEL_BASE_VADDR
add a3, a3, a5
li a4, DRAM_START
sub a3, a3, a4
jr a3
foo_test:
li gp, KERNEL_BASE_VADDR + KERNEL_PAGE_COUNT * PAGE_SIZE
mv a1, gp
PRINT_BIN_64
li t4, 0xaaaaaaaaaaaaaaaa
li s1, 512
1:
sd t4, (gp)
addi gp, gp, 8
addi s1, s1, -1
bnez s1, 1b
addi gp, gp, -PAGE_SIZE / 2
addi gp, gp, -PAGE_SIZE / 2
mv a1, gp
PRINT_BIN_64
csrr t0, satp
sd t0, GLOBAL_PAGE_KERNEL_SATP_OFFSET(gp)
sd zero, GLOBAL_PAGE_SCHED_MCS_OFFSET(gp)
# TODO: set up rest of gp
auipc a1, 0
PRINT_BIN_64
j shutdown
# set up kernel mapping:
# identity on lowmem (RWX for now)
# global kernel page (RW) in highmem
# kernel text page(s) (X) in highmem
csrw sip, x0
li a0, -1
csrw sie, a0
li a1, 2
csrs sstatus, a1
li a7, 0x54494D45
li a6, 0
li a1, 0
li a0, 20000000
ecall
# csrr a1, stvec
# PRINT_BIN_64
# auipc a1, 0
# PRINT_BIN_64
# csrw sip, x0
csrr a1, sstatus
PRINT_BIN_64
# csrw sip, x0
# csrr a1, sip
# PRINT_BIN_64
j sleep
boot_start:
j shutdown
.balign PAGE_SIZE
kernel_mem_area_start:
###############
# KERNEL CODE #
###############
mv x27, a1
li x10, 0b0000000000000000000000000000000010000000001000000010000000000000
li x11, 0b00001111
li x13, 1
slli x13, x13, 28
# should index into first PT
sd x11, (x10)
add x11, x11, x13
sd x11, 8(x10)
add x11, x11, x13
sd x11, 16(x10)
add x11, x11, x13
sd x11, 24(x10)
add x11, x11, x13
sd x11, 32(x10)
add x11, x11, x13
sd x11, 40(x10)
add x11, x11, x13
sd x11, 48(x10)
add x11, x11, x13
sd x11, 56(x10)
add x11, x11, x13
sd x11, 64(x10)
srli x10, x10, 12
li x2, 8
slli x2, x2, 60
add x10, x10, x2
li x3, 0b1111111111111111
slli x3, x3, 44
# add x10, x10, x3
sfence.vma
fence.i
csrw satp, x10
sfence.vma
fence.i
addi x27, x27, 72
lbu a1, (x27)
lbu x2, 1(x27)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 2(x27)
slli a1, a1, 8
add a1, a1, x2
lbu x2, 3(x27)
slli a1, a1, 8
add a1, a1, x2
print_bin:
li a3, 64
print_loop:
srli a0, a1, 63
addi a0, a0, '0'
li a7, 1
ecall
slli a1, a1, 1
addi a3, a3, -1
bnez a3, print_loop
j sleep
return_to_user:
csrw sscratch, fp
ld t1, 0x00(fp)
csrw sepc, t1
sd tp, 0x00(fp)
ld x1, 0x08(fp)
ld x2, 0x10(fp)
ld x3, 0x18(fp)
ld x4, 0x20(fp)
ld x5, 0x28(fp)
ld x6, 0x30(fp)
ld x7, 0x38(fp)
# no x8/fp
ld x9, 0x48(fp)
ld x10, 0x50(fp)
ld x11, 0x58(fp)
ld x12, 0x60(fp)
ld x13, 0x68(fp)
ld x14, 0x70(fp)
ld x15, 0x78(fp)
ld x16, 0x80(fp)
ld x17, 0x88(fp)
ld x18, 0x90(fp)
ld x19, 0x98(fp)
ld x20, 0xa0(fp)
ld x21, 0xa8(fp)
ld x22, 0xb0(fp)
ld x23, 0xb8(fp)
ld x24, 0xc0(fp)
ld x25, 0xc8(fp)
ld x26, 0xd0(fp)
ld x27, 0xd8(fp)
ld x28, 0xe0(fp)
ld x29, 0xe8(fp)
ld x30, 0xf0(fp)
ld x31, 0xf8(fp)
ld fp, 0x40(fp)
fence.i
sfence.vma x0, x0
fence iorw, iorw
sret
sleep:
li a0, -1
csrw sie, a0
sleep_loop:
wfi
j sleep_loop
shutdown:
li a7, 0x53525354
li a6, 0
li a0, 0
li a1, 0
ecall
# check if x1 belongs to address space x3
# store PTE PPN mask in x4
lui x4, 1048320
srli x4, x4, 10
# check that x1 is in user memory
srli x2, x1, 37
bnez x2, fail
# x2 = VPN[2]
srli x2, x1, 29
# x3 = (top PT)[VPN[2]]
slli x2, x2, 3
add x3, x3, x2
# x3 = top level PTE
ld x3, (x3)
# check valid bit
andi x2, x3, 1
beqz x2, fail
# check if leaf
andi x2, x3, 0b1110
bnez x2, found_leaf
.ifdef SANITY_CHECKS
# check D,A,G,U bits
andi x2, x3, 0b11110000
bnez x2, kernel_panic
# check bits above PPN
srli x2, x3, 53
bnez x2, kernel_panic
.endif
# extract new PT address from PTE
and x3, x3, x4
# x2 = VPN[1]
srli x2, x1, 20
andi x2, x2, 0b111111111
slli x2, x2, 3
add x3, x3, x2
ld x3, (x3)
# check valid bit
andi x2, x3, 1
beqz x2, fail
# check if leaf
andi x2, x3, 0b1110
bnez x2, found_leaf
.ifdef SANITY_CHECKS
# check D,A,G,U bits
andi x2, x3, 0b11110000
bnez x2, kernel_panic
# check bits above PPN
srli x2, x3, 53
bnez x2, kernel_panic
.endif
# extract new PT address from PTE
and x3, x3, x4
# x2 = VPN[0]
srli x2, x1, 11
andi x2, x2, 0b111111111
slli x2, x2, 3
add x3, x3, x2
ld x3, (x3)
# check valid bit
andi x2, x3, 1
beqz x2, fail
.ifdef SANITY_CHECKS
# check if leaf
andi x2, x3, 0b1110
beqz x2, kernel_panic
.endif
found_leaf:
.ifdef SANITY_CHECKS
# check G,U bits
andi x2, x3, 0b110000
# if G,U != 0,1: panic
xori x2, x2, 0b010000
bnez x2, kernel_panic
# check W,R bist
andi x2, x3, 0b110
# if W,R == 1,0: panic
xori x2, x2, 0b100
beqz x2, kernel_panic
# check bits above PPN
srli x2, x3, 53
bnez x2, kernel_panic
.endif
fail:
kernel_panic:
li a7, 1
li a0, 'P'
ecall
li a0, 'A'
ecall
li a0, 'N'
ecall
li a0, 'I'
ecall
li a0, 'C'
ecall
li a0, '!'
ecall
li a0, 10
ecall
j shutdown
handle_exception:
# TODO: what fences
fence iorw, iorw
sfence.vma x0, x0
fence.i
# sscretch is pointer to executor page
csrrw x1, sscratch, x1
sd x2, 0x10(x1)
sd x3, 0x18(x1)
sd x4, 0x20(x1)
sd x5, 0x28(x1)
sd x6, 0x30(x1)
sd x7, 0x38(x1)
sd x8, 0x40(x1)
sd x9, 0x48(x1)
sd x10, 0x50(x1)
sd x11, 0x58(x1)
sd x12, 0x60(x1)
sd x13, 0x68(x1)
sd x14, 0x70(x1)
sd x15, 0x78(x1)
sd x16, 0x80(x1)
sd x17, 0x88(x1)
sd x18, 0x90(x1)
sd x19, 0x98(x1)
sd x20, 0xa0(x1)
sd x21, 0xa8(x1)
sd x22, 0xb0(x1)
sd x23, 0xb8(x1)
sd x24, 0xc0(x1)
sd x25, 0xc8(x1)
sd x26, 0xd0(x1)
sd x27, 0xd8(x1)
sd x28, 0xe0(x1)
sd x29, 0xe8(x1)
sd x30, 0xf0(x1)