@@ -160,7 +160,8 @@ gpujpeg_get_devices_info(void)
160
160
#endif
161
161
}
162
162
#else
163
- // TODO: NEED IMPLEMENTATION
163
+ // TODO: NEED IMPLEMENTATION
164
+ printf ("[WARNING] gpujpeg_get_devices_info(): NOT YET IMPLEMENTED\n" );
164
165
#endif
165
166
return devices_info ;
166
167
}
@@ -261,7 +262,8 @@ gpujpeg_init_device(int device_id, int flags)
261
262
return -1 ;
262
263
}
263
264
#else
264
- // TODO: NEED IMPLEMENTATION
265
+ // TODO: NEED IMPLEMENTATION
266
+ printf ("[WARNING] gpujpeg_init_device(): NOT YET IMPLEMENTED\n" );
265
267
#endif
266
268
return 0 ;
267
269
}
@@ -440,6 +442,7 @@ void gpujpeg_set_device(int index)
440
442
cudaSetDevice (index );
441
443
#else
442
444
// TODO: NEED IMPLEMENTATION
445
+ printf ("[WARNING] gpujpeg_set_device(): NOT YET IMPLEMENTED\n" );
443
446
#endif
444
447
}
445
448
@@ -463,7 +466,8 @@ gpujpeg_component_print8(struct gpujpeg_component* component, uint8_t* d_data)
463
466
cudaFreeHost (data );
464
467
#else
465
468
// TODO: NEED IMPLEMENTATION
466
- #endif
469
+ printf ("[WARNING] gpujpeg_component_print8(): NOT YET IMPLEMENTED\n" );
470
+ #endif
467
471
}
468
472
469
473
/* Documented at declaration */
@@ -486,7 +490,8 @@ gpujpeg_component_print16(struct gpujpeg_component* component, int16_t* d_data)
486
490
cudaFreeHost (data );
487
491
#else
488
492
// TODO: NEED IMPLEMENTATION
489
- #endif
493
+ printf ("[WARNING] gpujpeg_component_print16(): NOT YET IMPLEMENTED\n" );
494
+ #endif
490
495
}
491
496
492
497
/* Documented at declaration */
@@ -508,6 +513,7 @@ gpujpeg_coder_init(struct gpujpeg_coder * coder)
508
513
}
509
514
#else
510
515
// TODO: NEED IMPLEMENTATION
516
+ printf ("[WARNING] gpujpeg_coder_init(): NOT YET IMPLEMENTED\n" );
511
517
#endif
512
518
// Initialize coder for no image
513
519
coder -> param .quality = -1 ;
@@ -594,6 +600,18 @@ gpujpeg_coder_init_image(struct gpujpeg_coder * coder, const struct gpujpeg_para
594
600
gpujpeg_cuda_check_error ("Coder color component device allocation" , return 0 );
595
601
#else
596
602
// TODO: NEED IMPLEMENTATION
603
+ // (Re)allocate color components in host memory
604
+ if (coder -> component != NULL ) {
605
+ free (coder -> component );
606
+ coder -> component = NULL ;
607
+ }
608
+ coder -> component = (struct gpujpeg_component * )malloc (param_image -> comp_count * sizeof (struct gpujpeg_component ));
609
+ // (Re)allocate color components in device memory
610
+ if (coder -> d_component != NULL ) {
611
+ free (coder -> d_component );
612
+ coder -> d_component = NULL ;
613
+ }
614
+ coder -> d_component = (struct gpujpeg_component * )malloc (param_image -> comp_count * sizeof (struct gpujpeg_component ));
597
615
#endif
598
616
coder -> component_allocated_size = param_image -> comp_count ;
599
617
}
@@ -738,6 +756,19 @@ gpujpeg_coder_init_image(struct gpujpeg_coder * coder, const struct gpujpeg_para
738
756
gpujpeg_cuda_check_error ("Coder segment device allocation" , return 0 );
739
757
#else
740
758
// TODO: NEED IMPLEMENTATION
759
+ // (Re)allocate segments in host memory
760
+ if (coder -> segment != NULL ) {
761
+ free (coder -> segment );
762
+ coder -> segment = NULL ;
763
+ }
764
+ coder -> segment = (struct gpujpeg_segment * ) malloc (coder -> segment_count * sizeof (struct gpujpeg_segment ));
765
+
766
+ // (Re)allocate segments in device memory
767
+ if (coder -> d_segment != NULL ) {
768
+ free (coder -> d_segment );
769
+ coder -> d_segment = NULL ;
770
+ }
771
+ coder -> d_segment = (struct gpujpeg_segment * ) malloc (coder -> segment_count * sizeof (struct gpujpeg_segment ));
741
772
#endif
742
773
coder -> segment_allocated_size = coder -> segment_count ;
743
774
}
@@ -870,6 +901,27 @@ gpujpeg_coder_init_image(struct gpujpeg_coder * coder, const struct gpujpeg_para
870
901
gpujpeg_cuda_check_error ("Coder quantized data device allocation" , return 0 );
871
902
#else
872
903
// TODO: NEED IMPLEMENTATION
904
+ // (Re)allocate preprocessor data in device memory
905
+ if (coder -> d_data != NULL ) {
906
+ free (coder -> d_data );
907
+ coder -> d_data = NULL ;
908
+ }
909
+ coder -> d_data = (uint8_t * ) malloc ((coder -> data_size + idct_overhead ) * sizeof (uint8_t ));
910
+
911
+ // (Re)allocated DCT and quantizer data in host memory
912
+ if (coder -> data_quantized != NULL ) {
913
+ free (coder -> data_quantized );
914
+ coder -> data_quantized = NULL ;
915
+ }
916
+ coder -> data_quantized = (int16_t * ) malloc (coder -> data_size * sizeof (int16_t ));
917
+
918
+ // (Re)allocated DCT and quantizer data in device memory
919
+ if (coder -> d_data_quantized != NULL ) {
920
+ free (coder -> d_data_quantized );
921
+ coder -> d_data_quantized = NULL ;
922
+ }
923
+
924
+ coder -> d_data_quantized = (int16_t * ) malloc ((coder -> data_size + idct_overhead ) * sizeof (int16_t ));
873
925
#endif
874
926
coder -> data_allocated_size = coder -> data_size + idct_overhead ;
875
927
}
@@ -882,7 +934,8 @@ gpujpeg_coder_init_image(struct gpujpeg_coder * coder, const struct gpujpeg_para
882
934
gpujpeg_cuda_check_error ("d_data memset failed" , return 0 );
883
935
#else
884
936
// TODO: NEED IMPLEMENTATION
885
- #endif
937
+ memset (coder -> d_data , 0 , coder -> data_size * sizeof (uint8_t ));
938
+ #endif
886
939
}
887
940
888
941
// Set data buffer to color components
@@ -934,6 +987,26 @@ gpujpeg_coder_init_image(struct gpujpeg_coder * coder, const struct gpujpeg_para
934
987
gpujpeg_cuda_check_error ("Huffman temp buffer device allocation" , return 0 );
935
988
#else
936
989
// TODO: NEED IMPLEMENTATION
990
+ // (Re)allocate huffman coder data in host memory
991
+ if (coder -> data_compressed != NULL ) {
992
+ free (coder -> data_compressed );
993
+ coder -> data_compressed = NULL ;
994
+ }
995
+ coder -> data_compressed = (uint8_t * )malloc (max_compressed_data_size * sizeof (uint8_t ));
996
+
997
+ // (Re)allocate huffman coder data in device memory
998
+ if (coder -> d_data_compressed != NULL ) {
999
+ free (coder -> d_data_compressed );
1000
+ coder -> d_data_compressed = NULL ;
1001
+ }
1002
+ coder -> d_data_compressed = (uint8_t * )malloc (max_compressed_data_size * sizeof (uint8_t ));
1003
+
1004
+ // (Re)allocate Huffman coder temporary buffer
1005
+ if (coder -> d_temp_huffman != NULL ) {
1006
+ free (coder -> d_temp_huffman );
1007
+ coder -> d_temp_huffman = NULL ;
1008
+ }
1009
+ coder -> d_temp_huffman = (uint8_t * )malloc (max_compressed_data_size * sizeof (uint8_t ));
937
1010
#endif
938
1011
coder -> data_compressed_allocated_size = max_compressed_data_size ;
939
1012
}
@@ -965,6 +1038,19 @@ gpujpeg_coder_init_image(struct gpujpeg_coder * coder, const struct gpujpeg_para
965
1038
gpujpeg_cuda_check_error ("Coder block list device allocation" , return 0 );
966
1039
#else
967
1040
// TODO: NEED IMPLEMENTATION
1041
+ // (Re)allocate list of block indices in host memory
1042
+ if (coder -> block_list != NULL ) {
1043
+ free (coder -> block_list );
1044
+ coder -> block_list = NULL ;
1045
+ }
1046
+ coder -> block_list = (uint64_t * )malloc (coder -> block_count * sizeof (* coder -> block_list ));
1047
+
1048
+ // (Re)allocate list of block indices in device memory
1049
+ if (coder -> d_block_list != NULL ) {
1050
+ free (coder -> d_block_list );
1051
+ coder -> d_block_list = NULL ;
1052
+ }
1053
+ coder -> d_block_list = (uint64_t * )malloc (coder -> block_count * sizeof (* coder -> d_block_list ));
968
1054
#endif
969
1055
coder -> block_allocated_size = coder -> block_count ;
970
1056
}
@@ -1041,7 +1127,15 @@ gpujpeg_coder_init_image(struct gpujpeg_coder * coder, const struct gpujpeg_para
1041
1127
cudaMemcpyAsync (coder -> d_segment , coder -> segment , coder -> segment_count * sizeof (struct gpujpeg_segment ), cudaMemcpyHostToDevice , stream );
1042
1128
gpujpeg_cuda_check_error ("Coder segment copy" , return 0 );
1043
1129
#else
1044
- // TODO: NEED IMPLEMENTATION
1130
+ // TODO: NEED IMPLEMENTATION
1131
+ // Copy components to device memory
1132
+ memcpy (coder -> d_component , coder -> component , coder -> param_image .comp_count * sizeof (struct gpujpeg_component ));
1133
+
1134
+ // Copy block lists to device memory
1135
+ memcpy (coder -> d_block_list , coder -> block_list , coder -> block_count * sizeof (* coder -> d_block_list ));
1136
+
1137
+ // Copy segments to device memory
1138
+ memcpy (coder -> d_segment , coder -> segment , coder -> segment_count * sizeof (struct gpujpeg_segment ));
1045
1139
#endif
1046
1140
coder -> allocated_gpu_memory_size = allocated_gpu_memory_size ;
1047
1141
@@ -1098,7 +1192,8 @@ gpujpeg_coder_deinit(struct gpujpeg_coder* coder)
1098
1192
if ( coder -> d_block_list != NULL )
1099
1193
cudaFree (coder -> d_block_list );
1100
1194
#else
1101
- // TODO: NEED IMPLEMENTATION
1195
+ // TODO: NEED IMPLEMENTATION
1196
+ printf ("[WARNING] gpujpeg_coder_deinit(): NOT YET IMPLEMENTED\n" );
1102
1197
#endif
1103
1198
GPUJPEG_CUSTOM_TIMER_DESTROY (coder -> duration_memory_to , return - 1 );
1104
1199
GPUJPEG_CUSTOM_TIMER_DESTROY (coder -> duration_memory_from , return - 1 );
@@ -1143,7 +1238,8 @@ static void *gpujpeg_cuda_malloc_host(size_t size) {
1143
1238
GPUJPEG_CHECK_EX (cudaMallocHost (& ptr , size ), "Could not alloc host pointer" , return NULL );
1144
1239
#else
1145
1240
// TODO: NEED IMPLEMENTATION
1146
- #endif
1241
+ printf ("[WARNING] gpujpeg_cuda_malloc_host(): NOT YET IMPLEMENTED\n" );
1242
+ #endif
1147
1243
return ptr ;
1148
1244
}
1149
1245
@@ -1180,7 +1276,12 @@ gpujpeg_image_load_from_file(const char* filename, uint8_t** image, size_t* imag
1180
1276
}
1181
1277
#else
1182
1278
// TODO: NEED IMPLEMENTATION
1183
- #endif
1279
+ data = (uint8_t * )malloc (* image_size * sizeof (uint8_t ));
1280
+ if ( * image_size != fread (data , sizeof (uint8_t ), * image_size , file ) ) {
1281
+ fprintf (stderr , "[GPUJPEG] [Error] Failed to load image data [%zd bytes] from file %s!\n" , * image_size , filename );
1282
+ return -1 ;
1283
+ }
1284
+ #endif
1184
1285
fclose (file );
1185
1286
1186
1287
* image = data ;
@@ -1266,7 +1367,8 @@ gpujpeg_image_destroy(uint8_t* image)
1266
1367
cudaFreeHost (image );
1267
1368
#else
1268
1369
// TODO: NEED IMPLEMENTATION
1269
- #endif
1370
+ printf ("[WARNING] gpujpeg_image_destroy(): NOT YET IMPLEMENTED\n" );
1371
+ #endif
1270
1372
return 0 ;
1271
1373
}
1272
1374
@@ -1684,7 +1786,8 @@ gpujpeg_opengl_texture_register(int texture_id, enum gpujpeg_opengl_texture_type
1684
1786
#endif
1685
1787
#else
1686
1788
// TODO: NEED IMPLEMENTATION
1687
- #endif
1789
+ printf ("[WARNING] gpujpeg_opengl_texture_register(): NOT YET IMPLEMENTED\n" );
1790
+ #endif
1688
1791
}
1689
1792
1690
1793
/* Documented at declaration */
@@ -1704,6 +1807,7 @@ gpujpeg_opengl_texture_unregister(struct gpujpeg_opengl_texture* texture)
1704
1807
cudaFreeHost (texture );
1705
1808
#else
1706
1809
// TODO: NEED TO BE IMPLEMENTED
1810
+ printf ("[WARNING] gpujpeg_opengl_texture_unregister(): NOT YET IMPLEMENTED\n" );
1707
1811
#endif
1708
1812
#else
1709
1813
(void ) texture ;
@@ -1751,6 +1855,7 @@ gpujpeg_opengl_texture_map(struct gpujpeg_opengl_texture* texture, size_t* data_
1751
1855
* data_size = d_data_size ;
1752
1856
#else
1753
1857
// TODO: NEED TO BE IMPLEMENTED
1858
+ printf ("[WARNING] gpujpeg_opengl_texture_map(): NOT YET IMPLEMENTED\n" );
1754
1859
(void ) data_size ;
1755
1860
GPUJPEG_MISSING_OPENGL (return NULL );
1756
1861
#endif
@@ -1792,6 +1897,7 @@ gpujpeg_opengl_texture_unmap(struct gpujpeg_opengl_texture* texture)
1792
1897
#endif
1793
1898
#else
1794
1899
// TODO: NEED IMPLEMENTATION
1900
+ printf ("[WARNING] gpujpeg_opengl_texture_unmap(): NOT YET IMPLEMENTED\n" );
1795
1901
#endif
1796
1902
}
1797
1903
@@ -1990,7 +2096,10 @@ float gpujpeg_custom_timer_get_duration(cudaEvent_t start, cudaEvent_t stop) {
1990
2096
return elapsedTime ;
1991
2097
}
1992
2098
#else
1993
- // TODO: NEED IMPLEMENTATION
2099
+ float gpujpeg_custom_timer_get_duration (float start , float stop ) {
2100
+ float elapsedTime = NAN ;
2101
+ return elapsedTime ;
2102
+ }
1994
2103
#endif
1995
2104
1996
2105
/* vi: set expandtab sw=4 : */
0 commit comments