-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvalue_deep.c
1413 lines (1275 loc) · 65.8 KB
/
value_deep.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "metac/backend/entry.h"
#include "metac/backend/iterator.h"
#include "metac/backend/value.h"
#include "metac/reflect.h"
#include <assert.h>
#include <errno.h>
#include <stdlib.h> /* qsort */
#include <string.h> /* mempy... */
/** \page deep_function_implementation Deep Functions implementation
Here are the initial thoughts on 'deep' functions: compare, delete and copy:
The simplest looks compare, let's start from it.
The easiest approach would be to - to convert to string and to compare strings.
There are several caveats:
string returns error if meets loop (actually we could return / *loop number* /.
also string doesn't support containerof scenario, - it returns NULL on that as well, because it's not compatible with c-init format(need to experiment)
the last thing - void* pointers can be compared only by value - this is all we can do if typecast handler returns NULL.
This is compatible with the string approach, but this is not the best thing - we didn't want to compare pointers.
The idea is to compare data and structure.
We can probably add a mode to fail if we have a pointer which wasn't casted from void, or which is only declared, but we don't have data about that.
(in that case we treat it as void * also).
So, we still could use the 'string' approach if we add all those things as string 'modes'. Potentially in future string could benefit from that.
The last caveat is - string approach will require more memory. So we're going to create a function that would compare in place.
during comparison we won't analyze the structure of data: e.g.
A
/ \
/ \
B C
\ /
\ /
D
Pic 1.
is going to be equal to
A
/ \
/ \
B C
| |
| |
D1 D2
Pic 2.
in case D1 is equal to D2.
This may be a subject for later additional research: e.g. metac_value_free shouldn't delete D twice. That means
that in order to implement metac_value_free we'll need to track memory blocks allocated. One more question would be
if we can allow 'weak pointers' - pointers that point to the field within a block of memory. We must not try to delete those memory blocks either.
We may:
1. we can make a flag in the handler to allow walkers to ignore such pointers (this is ok, we typically know in advance if the pointer is weak).
2. use containerof scenario to detect the beginning of the allocated memory (this is not always possible, or at least there are some cases where it will difficult)
3. we can track memory blocks (anyway for metac_value_copy we will need to know all memory blocks sizes), so we may try to detect weak pointers.
The same is applicable for copy function, but it can tolerate weak pointers, because it's not doing anything with the original struct.
when copying it can create a strong pointer instead of weak and generate a copy of part of mem block.
This behavior of 'changing' the structure and making Pic 2 out of Pic 1 structure isn't good, but this may be a known behavior flaw.
in general we must decide on if we're going to allow:
1. only tree structures? (pic 2). In that case the simple cycle check implemented in value_string can be enough.
2. DAGs (pic 1.) - we'll need to have a hash table of pointers to identify if
3. DAGs with weak pointers pointing to the part of the allocated memory (the most difficult case) - some extra mechanism (maybe something in addition to hash table)
so, with weak pointers we can deal by collecting all of such values,
We can sort them by address and if we have size, we can find what pointers point to inner and what not (sweep line algorithm. O(n*log(n)))
the pointers which point to the inner blocks are weak.
it would be nice to re-use the same handler pointers we used for strings.
we must (or shouldn't) not ask the handler twice about the same object during the same operation.
e.g. to make a copy of an object - we need to know the size of the object in advance.
that's ok of all types except for flexible arrays. and we don't know on struct level if we have a flexible array.
The generic way is to go and build the values hierarchy, in parallel, we're tracking all pointers and their corresponding arrays they're pointing to.
. We also need to calculate the size of each value. Ideally we must get len on the base_type/enum level and calculate length on each level. That will
allow us to solve the issue with flexible arrays. we may also potentially detect overlap for cases when struct with flexible array gives us bigger
len and we have that struct in an array with count >1. though it may be intended behavior (??? for whom???).
so building that hierarchy with extra data (each child provides this info: value, actual len, ... pointer to list of children).
+ we need to track pointers and that will be additional data. part.
value_map {
hashmap [address] struct {pointer_value, resulting_array_value and size of the block (if any), weak flag (calculated later)};
value_data, size, children ->...
}
For delete we will just find outer blocks and delete all of them and clean up all the data.
For a copy we'll need to make a copy of all blocks with the same size, and using a map we need to replicate the same decision based on the result of handlers.
(new_value_from_existing_with_different_addr?? -> new_value(get_entry(old), new address))
we may build this struct as a separate internal function and use this data to other functions, e.g. copy, delete.
Summary:
1. there can be different traits of behavior of each function. we may want to support them:
1.1. compare_modes: only values, don't pay attention to structure, compare with structure awareness.
1.2. delete - we do really want to be aware of structure always
1.3. copy_modes: strict (aware of structure and create full copy), only data: treat all pointers as strong. this
will create a Tree instead of DAG with weak pointers. In some cases this may be a desired behavior. E.g. we
may want to copy DAG with weak pointers into the tree and delete after that. delete will always be struct-aware.
compare op will return true or false based on the mode.
For non-structure aware traits we don't need to build a hashmap.
for building map we may also have 3 types of how we want to treat pointers:
1. fail on void* and declarations types (if handler didn't fix that)
2. fallback (we won't be able to copy those, but we can use the same values, call delete and compare values of the pointer itself)
3. ignore those pointers (don't do anything with them).
We want to deliver this original function that builds that data as a part of the API.
it may be used for further implementations like json serialization.
There more considerations if we now speak about values with parameters
(list of args and result of function).
we may want to support it , but in order to do so we need to make sure that
value_memory_map supports that.
it seems like we always create struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, 0);
even for base_types.
so our memory entry will be with value of type with parameters.
we'll need to filter those things out. we also can provide sz as 0. or we need to create a flag to
pseudo entry
*/
/* get n'th parent (to work with loop_level) */
struct metac_memory_entry * metac_memory_entry_parent(struct metac_memory_entry * p_memory_entry, int n) {
if (n < 0) {
return NULL;
}
while (n > 0) {
if (p_memory_entry->p_parent == NULL) {
return NULL;
}
p_memory_entry = p_memory_entry->p_parent;
--n;
}
return p_memory_entry;
}
struct metac_memory_entry * metac_new_memory_entry(metac_value_t * p_val, metac_size_t byte_size, metac_num_t children_count) {
struct metac_memory_entry * p_memory_entry = calloc(1, sizeof(*p_memory_entry));
if (p_memory_entry == NULL) {
return NULL;
}
if (children_count > 0) {
p_memory_entry->pp_children = calloc(children_count, sizeof(*p_memory_entry->pp_children));
if (p_memory_entry->pp_children == NULL) {
free(p_memory_entry);
return NULL;
}
p_memory_entry->children_count = children_count;
}
p_memory_entry->p_val = p_val;
p_memory_entry->byte_size = byte_size;
return p_memory_entry;
}
void metac_memory_entry_delete(struct metac_memory_entry * p_memory_entry) {
if (p_memory_entry == NULL) {
return;
}
metac_recursive_iterator_t * p_iter = metac_new_recursive_iterator(p_memory_entry);
for (struct metac_memory_entry * p = (struct metac_memory_entry *)metac_recursive_iterator_next(p_iter); p != NULL;
p = (struct metac_memory_entry *)metac_recursive_iterator_next(p_iter)) {
int state = metac_recursive_iterator_get_state(p_iter);
switch (state) {
case METAC_R_ITER_start: {
if (p->p_val != NULL) {
metac_value_delete(p->p_val);
}
if (p->children_count == 0) {
assert(p->pp_children == NULL);
free(p);
metac_recursive_iterator_done(p_iter, NULL);
continue;
}
for (metac_num_t i = 0; i < p->children_count; ++i) {
if (p->pp_children[i] != NULL) {
metac_recursive_iterator_create_and_append_dep(p_iter, p->pp_children[i]);
}
}
metac_recursive_iterator_set_state(p_iter, 1);
continue;
}
case 1: {
while(metac_recursive_iterator_dep_queue_is_empty(p_iter) == 0) {
metac_recursive_iterator_dequeue_and_delete_dep(p_iter, NULL, NULL);
}
free(p->pp_children);
free(p);
metac_recursive_iterator_done(p_iter, NULL);
continue;
}
}
}
metac_recursive_iterator_free(p_iter);
}
static int addr_ptr_key_hash(void* key) {
return hashmapHash(&key, sizeof(void*));
}
static bool addr_ptr_key_equals(void* keyA, void* keyB) {
return keyA == keyB;
}
void metac_value_memory_map_delete(struct metac_memory_map * p_map) {
if (p_map->p_addr_ptr_val_to_memory_entry != NULL) {
hashmapFree(p_map->p_addr_ptr_val_to_memory_entry);
}
if (p_map->p_top_entry != NULL) {
metac_memory_entry_delete(p_map->p_top_entry);
}
free(p_map);
}
struct _store_hash_map_entry_context {
metac_num_t len;
metac_num_t current;
void ** p_keys;
};
/* callback for hashmapForEach */
static bool _store_hash_map_entry(void* key, void* value, void* context) {
assert(context != NULL);
struct _store_hash_map_entry_context * p_context = (struct _store_hash_map_entry_context *)context;
assert(p_context->current < p_context->len);
if (!(p_context->current < p_context->len)) {
return false;
}
p_context->p_keys[p_context->current] = key;
++p_context->current;
return true;
}
/* callback for qsort */
static int _sort_addr_(const void *a, const void *b) {
void* key_a = *(void**)a;
void* key_b = *(void**)b;
if (key_a < key_b) return -1;
if (key_a == key_b) return 0;
return 1;
}
static int _identify_allocatable_entries(struct metac_memory_map * p_map) {
if (p_map->p_addr_ptr_val_to_memory_entry == NULL) {
return 0;
}
struct _store_hash_map_entry_context context = {.len = hashmapSize(p_map->p_addr_ptr_val_to_memory_entry), .current = 0, .p_keys = NULL};
if (context.len == 0) {
return 0;
}
context.p_keys = calloc(context.len, sizeof(*context.p_keys));
if (context.p_keys == NULL) {
return -(ENOMEM);
}
hashmapForEach(p_map->p_addr_ptr_val_to_memory_entry, _store_hash_map_entry, &context);
assert(context.current == context.len);
/* sort keys for sweep line algorithm */
qsort(context.p_keys, context.len, sizeof(*context.p_keys), _sort_addr_);
/* sweep line*/
void *p_main = NULL;
struct metac_memory_entry * p_main_entry = NULL;
for (metac_size_t i = 0; i < context.len; ++i) {
void *p_current = context.p_keys[i];
struct metac_memory_entry * p_current_entry = hashmapGet(p_map->p_addr_ptr_val_to_memory_entry, p_current);
assert(p_current_entry != NULL);
if (p_current_entry->byte_size <= 0) {
// artificial elements, like param/function loads
continue;
}
if (p_main == NULL) {
p_main = p_current;
p_main_entry = p_current_entry;
continue;
}
if (p_main + p_main_entry->byte_size <= p_current) {
p_main = p_current;
p_main_entry = p_current_entry;
continue;
}
if (p_main + p_main_entry->byte_size < p_current + p_current_entry->byte_size) {
metac_size_t delta = p_current + p_current_entry->byte_size - (p_main + p_main_entry->byte_size);
printf("WARNING: partial overlap %p:%"METAC_SIZE_PRIx" with %p:%"METAC_SIZE_PRIx
". should we extend the first size by %"METAC_SIZE_PRIx"\n",
p_main, p_main_entry->byte_size, p_current, p_current_entry->byte_size,
delta);
}
p_current_entry->offset_from_allocate_base = p_current - p_main;
}
free(context.p_keys);
return 0;
}
/* default mode - failsafe */
static metac_value_memory_map_mode_t const _default_value_memory_map_mode = {
.memory_block_mode = METAC_MMODE_dag,
.flex_array_mode = METAC_FLXARR_fail,
.union_mode = METAC_UNION_fail,
.unknown_ptr_mode = METAC_UPTR_fail,
};
struct metac_memory_map * metac_new_value_memory_map_ex(
metac_value_t * p_val,
metac_value_memory_map_mode_t * p_map_mode,
metac_tag_map_t * p_tag_map) {
if (p_map_mode == NULL) {
p_map_mode = (metac_value_memory_map_mode_t *)&_default_value_memory_map_mode;
}
p_val = metac_new_value_from_value(p_val);
if (p_val == NULL) {
return NULL;
}
struct metac_memory_map *p_map = calloc(1, sizeof(*p_map));
if (p_map == NULL) {
return NULL;
}
if (p_map_mode->memory_block_mode == METAC_MMODE_dag) {
p_map->p_addr_ptr_val_to_memory_entry = hashmapCreate(1024, addr_ptr_key_hash, addr_ptr_key_equals);
if (p_map->p_addr_ptr_val_to_memory_entry == NULL) {
free(p_map);
return NULL;
}
}
metac_recursive_iterator_t * p_iter = metac_new_recursive_iterator(p_val);
for (metac_value_t * p = (metac_value_t *)metac_recursive_iterator_next(p_iter); p != NULL;
p = (metac_value_t *)metac_recursive_iterator_next(p_iter)) {
int state = metac_recursive_iterator_get_state(p_iter);
metac_kind_t final_kind;
if (metac_value_kind(p) != METAC_KND_func_parameter || metac_value_has_parameter_load(p) == 0) {
final_kind = metac_value_final_kind(p, NULL);
} else {
final_kind = METAC_KND_func_parameter; // unspecified and va_arg;
}
switch(final_kind) {
case METAC_KND_enumeration_type:
case METAC_KND_base_type: {
metac_size_t sz = 0;
if (metac_entry_byte_size(metac_value_entry(p), &sz) != 0) {
metac_recursive_iterator_fail(p_iter);
continue;
}
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, 0);
if (p_memory_entry == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
metac_recursive_iterator_done(p_iter, p_memory_entry);
continue;
}
//case METAC_KND_class_type:
case METAC_KND_union_type:
case METAC_KND_struct_type:
case METAC_KND_array_type: {
switch(state) {
case METAC_R_ITER_start: {
metac_size_t sz = 0; // this is initial size. it may be updated if we have flex array on the next state
metac_num_t children_count = 0;
metac_flag_t failure = 0;
if (final_kind != METAC_KND_array_type) {
assert(metac_value_has_members(p) != 0);
if (metac_entry_byte_size(metac_value_entry(p), &sz) != 0) {
metac_recursive_iterator_fail(p_iter);
continue;
}
metac_num_t mcount = metac_value_member_count(p);
if (final_kind == METAC_KND_union_type) {
if (mcount > 0) {
if (p_tag_map != NULL) {
metac_value_event_t ev = {.type = METAC_RQVST_union_member, .p_return_value = NULL};
metac_entry_tag_t * p_tag = metac_tag_map_tag(p_tag_map, metac_value_entry(p));
if (p_tag != NULL && p_tag->handler) {
if (metac_value_event_handler_call(p_tag->handler, p_iter, &ev, p_tag->p_context) != 0) {
metac_recursive_iterator_fail(p_iter);
continue;
}
if (ev.p_return_value != NULL) {
metac_recursive_iterator_create_and_append_dep(p_iter, ev.p_return_value);
children_count = 1;
}
}
}
if (children_count == 0 && p_map_mode->union_mode == METAC_UNION_fail) {
failure = 1;
break;
}
}
} else { /* structs, classes must go over all members */
for (metac_num_t i = 0; i < mcount; ++i) {
metac_value_t * p_memb_val = metac_new_value_by_member_id(p, i);
if (p_memb_val == NULL) {
failure = 1;
break;
}
metac_recursive_iterator_create_and_append_dep(p_iter, p_memb_val);
++children_count;
}
}
if (failure != 0) {
metac_recursive_iterator_set_state(p_iter, 2); // failure cleanup
continue;
}
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, children_count);
if (p_memory_entry == NULL) {
metac_recursive_iterator_set_state(p_iter, 2); // failure cleanup
continue;
}
metac_recursive_iterator_set_context(p_iter, p_memory_entry);
metac_recursive_iterator_set_state(p_iter, 1);
continue;
} else { // final_kind == METAC_KND_array_type
assert(metac_value_has_elements(p) != 0);
metac_value_t * p_local = p;
metac_value_t * p_non_flexible = NULL;
if (metac_value_element_count_flexible(p) != 0) {
if (p_tag_map != NULL) {
metac_value_event_t ev = {.type = METAC_RQVST_flex_array_count, .p_return_value = NULL};
metac_entry_tag_t * p_tag = metac_tag_map_tag(p_tag_map, metac_value_entry(p));
if (p_tag != NULL && p_tag->handler) {
if (metac_value_event_handler_call(p_tag->handler, p_iter, &ev, p_tag->p_context) != 0) {
metac_recursive_iterator_set_state(p_iter, 2); // failure cleanup
continue;
}
p_non_flexible = ev.p_return_value;
}
}
if (p_non_flexible == NULL) { /* this is a way to skip that item */
if (p_map_mode->flex_array_mode == METAC_FLXARR_fail) {
metac_recursive_iterator_fail(p_iter);
continue;
}
p_non_flexible = metac_new_element_count_value(p, 0); /* generate {} - basically this is ignore - safe default */
}
if (p_non_flexible == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
p_local = p_non_flexible;
}
metac_num_t mcount = metac_value_element_count(p_local);
for (metac_num_t i = 0; i < mcount; ++i) {
metac_value_t * p_el_val = metac_new_value_by_element_id(p_local, i);
if (p_el_val == NULL) {
failure = 1;
break;
}
metac_recursive_iterator_create_and_append_dep(p_iter, p_el_val);
++children_count;
}
if (children_count > 0 && metac_entry_byte_size(metac_value_entry(p_local), &sz) != 0) {
failure = 1;
}
if (p_non_flexible) {
metac_value_delete(p_non_flexible);
p_local = p;
}
if (failure != 0) {
metac_recursive_iterator_set_state(p_iter, 2); /* failure cleanup */
continue;
}
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, children_count);
if (p_memory_entry == NULL) {
metac_recursive_iterator_set_state(p_iter, 2); // failure cleanup
continue;
}
metac_recursive_iterator_set_context(p_iter, p_memory_entry);
metac_recursive_iterator_set_state(p_iter, 1);
continue;
}
}
case 1: {
struct metac_memory_entry * p_memory_entry = (struct metac_memory_entry *)metac_recursive_iterator_get_context(p_iter);
assert(p_memory_entry != NULL);
metac_num_t children_count = 0;
metac_flag_t failure = 0;
while(metac_recursive_iterator_dep_queue_is_empty(p_iter) == 0) {
metac_value_t * p_memb_val = NULL;
struct metac_memory_entry * p_member_memory_entry = metac_recursive_iterator_dequeue_and_delete_dep(p_iter, (void**)&p_memb_val, NULL);
if (p_member_memory_entry == NULL) {
if (p_memb_val != NULL) {
metac_value_delete(p_memb_val);
}
failure = 1;
break;
}
// check if member offset+size goes beyond our size
assert(metac_value_addr(p_member_memory_entry->p_val) >= metac_value_addr(p_memory_entry->p_val));
size_t offset = metac_value_addr(p_member_memory_entry->p_val) - metac_value_addr(p_memory_entry->p_val);
if (p_memory_entry->byte_size < offset + p_member_memory_entry->byte_size) {
// made bigger because of flexible member
p_memory_entry->byte_size = offset + p_member_memory_entry->byte_size;
}
p_memory_entry->pp_children[children_count] = p_member_memory_entry;
p_member_memory_entry->p_parent = p_memory_entry;
++children_count;
}
if (failure != 0) {
metac_recursive_iterator_set_state(p_iter, 2); // failure cleanup
continue;
}
metac_recursive_iterator_done(p_iter, p_memory_entry);
continue;
}
case 2:
default: {
/* failure cleanup*/
while(metac_recursive_iterator_dep_queue_is_empty(p_iter) == 0) {
metac_value_t * p_memb_val = NULL;
struct metac_memory_entry * p_member_memory_entry = metac_recursive_iterator_dequeue_and_delete_dep(p_iter, (void**)&p_memb_val, NULL);
if (p_member_memory_entry == NULL) {
if (p_memb_val != NULL) {
metac_value_delete(p_memb_val);
}
}else {
metac_memory_entry_delete(p_member_memory_entry);
}
}
struct metac_memory_entry * p_memory_entry = (struct metac_memory_entry *)metac_recursive_iterator_get_context(p_iter);
if (p_memory_entry != NULL) {
// p_in stil must exist if we fail
if (p_memory_entry->p_val == p) {
p_memory_entry->p_val = NULL;
}
metac_memory_entry_delete(p_memory_entry);
}
metac_recursive_iterator_fail(p_iter);
continue;
}
}
}
case METAC_KND_pointer_type: {
switch(state) {
case METAC_R_ITER_start: {
int loop_level = 0;
metac_size_t sz = 0;
if (metac_entry_byte_size(metac_value_entry(p), &sz) != 0) {
metac_recursive_iterator_fail(p_iter);
continue;
}
/* simple case 1 - NULL pointer */
void *p_addr = NULL;
if (metac_value_pointer(p, &p_addr) != 0) {
metac_recursive_iterator_fail(p_iter);
continue;
}
if (p_addr == NULL) {
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, 0);
if (p_memory_entry == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
metac_recursive_iterator_done(p_iter, p_memory_entry);
continue;
}
/* simple case 2 - if we are in the loop - children will be created on the parent level */
loop_level = metac_value_level_introduced_loop(p_iter);
if (loop_level > 0) {
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, 0);
if (p_memory_entry == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
p_memory_entry->loop_level = loop_level;
metac_recursive_iterator_done(p_iter, p_memory_entry);
continue;
}
/* generic case - we need to convert pointer to array (can be flexible), try handler first */
metac_value_t * p_arr_val = NULL;
if (p_tag_map != NULL) {
metac_value_event_t ev = {.type = METAC_RQVST_pointer_array_count, .p_return_value = NULL};
metac_entry_tag_t * p_tag = metac_tag_map_tag(p_tag_map, metac_value_entry(p));
if (p_tag != NULL && p_tag->handler) {
if (metac_value_event_handler_call(p_tag->handler, p_iter, &ev, p_tag->p_context) != 0) {
metac_recursive_iterator_fail(p_iter);
continue;
}
p_arr_val = ev.p_return_value;
}
}
if (p_arr_val == NULL && (metac_value_is_void_pointer(p) != 0 || metac_value_is_declaration_pointer(p) != 0)) {
switch(p_map_mode->unknown_ptr_mode) {
case METAC_UPTR_fail: {
metac_recursive_iterator_fail(p_iter);
continue;
}
case METAC_UPTR_as_values:
case METAC_UPTR_ignore: { /* FALLBACK to the simple case 1 */
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, 0);
if (p_memory_entry == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
metac_recursive_iterator_done(p_iter, p_memory_entry);
continue;
}
case METAC_UPTR_as_one_byte_ptr: {
static struct metac_entry unsigned_char = { /* this is static - it will be used outside as type of array elem*/
.kind = METAC_KND_base_type,
.name = "unsigned char",
.parents_count = 0, /* warning: no parent */
.base_type_info = {
.byte_size = 1,
.encoding = METAC_ENC_unsigned_char,
},
};
struct metac_entry ptr = { /* CAUTION: this is emulation of static, but we immediatly delete p_new_ptr_val */
.kind = METAC_KND_pointer_type,
.parents_count = 0,
.pointer_type_info = {
.byte_size = sizeof(void*),
.type = &unsigned_char,
},
};
metac_value_t *p_new_ptr_val = metac_new_value(&ptr, metac_value_addr(p));
if (p_new_ptr_val == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
p_arr_val = metac_new_element_count_value(p_new_ptr_val, 1);
metac_value_delete(p_new_ptr_val);
if (p_arr_val == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
}
}
}
if (p_arr_val == NULL) {
p_arr_val = metac_new_element_count_value(p, 1); /*create arr with len 1 - good default */
}
if (p_arr_val == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
// assert that we really have array
if (metac_value_has_elements(p_arr_val) == 0) {
metac_value_delete(p_arr_val);
metac_recursive_iterator_fail(p_iter);
continue;
}
/* finally we have p_arr_val which will be the child */
metac_recursive_iterator_create_and_append_dep(p_iter, p_arr_val);
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, sz, 1);
if (p_memory_entry == NULL) {
metac_recursive_iterator_fail(p_iter);
continue;
}
metac_recursive_iterator_set_context(p_iter, p_memory_entry);
metac_recursive_iterator_set_state(p_iter, 1);
continue;
}
case 1: {
struct metac_memory_entry * p_memory_entry = (struct metac_memory_entry *)metac_recursive_iterator_get_context(p_iter);
assert(p_memory_entry != NULL);
metac_value_t * p_memb_val = NULL;
struct metac_memory_entry * p_member_memory_entry = metac_recursive_iterator_dequeue_and_delete_dep(p_iter, (void**)&p_memb_val, NULL);
if (p_member_memory_entry == NULL) {
if (p_memb_val != NULL) {
metac_value_delete(p_memb_val);
}
if (p_memory_entry != NULL) {
// p_in stil must exist if we fail
if (p_memory_entry->p_val == p) {
p_memory_entry->p_val = NULL;
}
metac_memory_entry_delete(p_memory_entry);
}
metac_recursive_iterator_fail(p_iter);
continue;
}
p_memory_entry->pp_children[0] = p_member_memory_entry;
p_member_memory_entry->p_parent = p_memory_entry;
/* if in DAG mode - keep also info about new entries where pointers point in hashmap*/
if (p_map->p_addr_ptr_val_to_memory_entry != NULL) {
void * addr = metac_value_addr(p_member_memory_entry->p_val);
if (hashmapContainsKey(p_map->p_addr_ptr_val_to_memory_entry, addr) != 0) {
/* compare sizes and update if we have bigger */
struct metac_memory_entry * p_prev_member_memory_entry =
(struct metac_memory_entry *)hashmapGet(p_map->p_addr_ptr_val_to_memory_entry, addr);
if (p_member_memory_entry->byte_size > p_prev_member_memory_entry->byte_size) {
if (hashmapPut(p_map->p_addr_ptr_val_to_memory_entry, addr, (void*)p_member_memory_entry, NULL)!=0) {
// p_in stil must exist if we fail
if (p_memory_entry->p_val == p) {
p_memory_entry->p_val = NULL;
}
metac_memory_entry_delete(p_memory_entry);
metac_recursive_iterator_fail(p_iter);
continue;
}
}
} else {
if (hashmapPut(p_map->p_addr_ptr_val_to_memory_entry, addr, (void*)p_member_memory_entry, NULL)!=0) {
// p_in stil must exist if we fail
if (p_memory_entry->p_val == p) {
p_memory_entry->p_val = NULL;
}
metac_memory_entry_delete(p_memory_entry);
metac_recursive_iterator_fail(p_iter);
continue;
}
}
}
metac_recursive_iterator_done(p_iter, p_memory_entry);
continue;
}
}
}
case METAC_KND_func_parameter:// this is only it's unspecified param // TODO: we need also va_arg here
case METAC_KND_subroutine_type:
case METAC_KND_subprogram: {
switch(state) {
case METAC_R_ITER_start: {
metac_flag_t failure = 0;
assert(metac_value_has_parameter_load(p) != 0);
metac_num_t children_count = 0;
metac_num_t mcount = metac_value_parameter_count(p);
children_count += mcount;
for (metac_num_t i = 0; i < mcount; ++i) {
// we're making copy because memmap deletes all values
metac_value_t * p_param_val = metac_value_parameter_new_item(p, i);
if (p_param_val == NULL) {
failure = 1;
break;
}
metac_recursive_iterator_create_and_append_dep(p_iter, p_param_val);
}
if (failure != 0) {
metac_recursive_iterator_set_state(p_iter, 2); /* failure cleanup */
continue;
}
// TODO: if there is a result, add it as a lask child
struct metac_memory_entry * p_memory_entry = metac_new_memory_entry(p, 0, children_count);
if (p_memory_entry == NULL) {
metac_recursive_iterator_set_state(p_iter, 2); // failure cleanup
continue;
}
metac_recursive_iterator_set_context(p_iter, p_memory_entry);
metac_recursive_iterator_set_state(p_iter, 1);
continue;
}
case 1: {
struct metac_memory_entry * p_memory_entry = (struct metac_memory_entry *)metac_recursive_iterator_get_context(p_iter);
assert(p_memory_entry != NULL);
metac_num_t children_count = 0;
metac_flag_t failure = 0;
while(metac_recursive_iterator_dep_queue_is_empty(p_iter) == 0) {
metac_value_t * p_memb_val = NULL;
struct metac_memory_entry * p_member_memory_entry = metac_recursive_iterator_dequeue_and_delete_dep(p_iter, (void**)&p_memb_val, NULL);
if (p_member_memory_entry == NULL) {
if (p_memb_val != NULL) {
metac_value_delete(p_memb_val);
}
failure = 1;
break;
}
p_memory_entry->pp_children[children_count] = p_member_memory_entry;
p_member_memory_entry->p_parent = p_memory_entry;
++children_count;
}
if (failure != 0) {
metac_recursive_iterator_set_state(p_iter, 2); // failure cleanup
continue;
}
metac_recursive_iterator_done(p_iter, p_memory_entry);
continue;
}
case 2:
default: {
/* failure cleanup*/
while(metac_recursive_iterator_dep_queue_is_empty(p_iter) == 0) {
metac_value_t * p_memb_val = NULL;
struct metac_memory_entry * p_member_memory_entry = metac_recursive_iterator_dequeue_and_delete_dep(p_iter, (void**)&p_memb_val, NULL);
if (p_member_memory_entry == NULL) {
if (p_memb_val != NULL) {
metac_value_delete(p_memb_val);
}
}else {
metac_memory_entry_delete(p_member_memory_entry);
}
}
struct metac_memory_entry * p_memory_entry = (struct metac_memory_entry *)metac_recursive_iterator_get_context(p_iter);
if (p_memory_entry != NULL) {
// p_in stil must exist if we fail
if (p_memory_entry->p_val == p) {
p_memory_entry->p_val = NULL;
}
metac_memory_entry_delete(p_memory_entry);
}
metac_recursive_iterator_fail(p_iter);
continue;
}
}
}
default: {
/*quickly fail if we don't know how to handle*/
metac_recursive_iterator_fail(p_iter);
continue;
}
}
}
p_map->p_top_entry = metac_recursive_iterator_get_out(p_iter, NULL, NULL);
metac_recursive_iterator_free(p_iter);
// if error
if (p_map->p_top_entry == NULL) {
if (p_map->p_addr_ptr_val_to_memory_entry != NULL) {
hashmapFree(p_map->p_addr_ptr_val_to_memory_entry); // hasmap containes invalid references. if p_map->p_top_entry is NULL FSM will free data
}
free(p_map);
p_map = NULL;
if (p_val != NULL) {
metac_value_delete(p_val);
p_val = NULL;
}
return NULL;
}
/* p_map is fully built at this point, but in case of DAG no all entries are marked as non-allocatable.
it's ok to delete p_map with metac_value_memory_map_delete if needed */
if (_identify_allocatable_entries(p_map) != 0 ) {
metac_value_memory_map_delete(p_map);
p_map = NULL;
}
return p_map;
}
struct _metac_value_free_context {
void (*free_fn)(void *ptr);
};
/* callback for hashmapForEach */
static bool _value_free(void* key, void* value, void* context) {
assert(context != NULL);
struct _metac_value_free_context * p_context = (struct _metac_value_free_context *)context;
void* ptr = key;
struct metac_memory_entry *p_ptr_entry = (struct metac_memory_entry *)value;
if (p_ptr_entry->offset_from_allocate_base == 0) { /* delete non weak only */
if (p_context->free_fn != NULL) {
p_context->free_fn(ptr);
}
}
return true;
}
metac_flag_t metac_value_free_ex(metac_value_t * p_val,
metac_value_memory_map_non_handled_mode_t * p_mode,
void (*free_fn)(void *ptr),
metac_tag_map_t * p_tag_map) {
if (free_fn == NULL) {
free_fn = free;
}
struct metac_memory_map * p_map = metac_new_value_memory_map_ex(p_val,
(metac_value_memory_map_mode_t[]){{
.memory_block_mode = METAC_MMODE_dag /* delete won't work in tree mode */,
.unknown_ptr_mode = (p_mode != NULL)?p_mode->unknown_ptr_mode:METAC_UPTR_fail,
.union_mode = (p_mode != NULL)?p_mode->union_mode:METAC_UNION_fail,
.flex_array_mode = (p_mode != NULL)?p_mode->flex_array_mode:METAC_FLXARR_fail,
}},
p_tag_map);
if (p_map == NULL) {
return 0;
}
struct _metac_value_free_context context = {.free_fn = free_fn,};
hashmapForEach(p_map->p_addr_ptr_val_to_memory_entry, _value_free, &context);
metac_value_memory_map_delete(p_map);
return 1;
}
struct _metac_value_copy_context {
void *(*calloc_fn)(size_t nmemb, size_t size);
void (*free_fn)(void *ptr);
struct metac_memory_map * p_map;
Hashmap * src_to_dst;
bool preallocation_failed;
};
static bool _value_copy_preallocate(void* key, void* value, void* context) {
assert(context != NULL);
struct _metac_value_copy_context * p_context = (struct _metac_value_copy_context *)context;
if (p_context->calloc_fn == NULL || p_context->src_to_dst == NULL) {
p_context->preallocation_failed = 1;
return false;
}
struct metac_memory_entry *p_ptr_entry = (struct metac_memory_entry *)value;
if (p_ptr_entry->offset_from_allocate_base == 0) { /* delete non weak only */
void * dst = p_context->calloc_fn(1, p_ptr_entry->byte_size);
if (dst == NULL) {
p_context->preallocation_failed = 1;
return false;
}
if (hashmapPut(p_context->src_to_dst, key, dst, NULL) != 0) {
free(dst);
p_context->preallocation_failed = 1;
return false;
}
}
return true;
}
static bool _value_copy_free(void* key, void* value, void* context) {
assert(context != NULL);
struct _metac_value_copy_context * p_context = (struct _metac_value_copy_context *)context;
assert(p_context->free_fn != NULL);
if (p_context->free_fn == NULL) {
return false;
}
if (value != NULL) {
p_context->free_fn(value);
}
return true;
}
/* copy src to dst (with rewriting data), be careful, dst must have enough space.
don't use flexibe array. instead make val as a ptr to that flexible array so
the function will create a pointer and create a deep copy for everything it points to.
returns dst in case of success.*/
metac_value_t *metac_value_copy_ex(metac_value_t * p_src_val, metac_value_t * p_dst_val,
metac_value_memory_map_mode_t * p_mode,
void *(*calloc_fn)(size_t nmemb, size_t size),
void (*free_fn)(void *ptr), /* free used in case of failure */
metac_tag_map_t * p_tag_map) {
/*set some defaults*/
if (p_mode == NULL) {
p_mode = (metac_value_memory_map_mode_t *)&_default_value_memory_map_mode;
}
if (calloc_fn == NULL) {
calloc_fn = calloc;
}
if (free_fn == NULL) {
free_fn = free;
}