Skip to content

Commit

Permalink
remove irrelevant TODOs
Browse files Browse the repository at this point in the history
  • Loading branch information
irenaby committed Jan 14, 2025
1 parent 90d9dc2 commit e88aaf2
Showing 1 changed file with 0 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -498,18 +498,13 @@ def __init__(self, unit_test):

def get_resource_utilization(self):
weights = 17920 * 4 / 8
# activation = 4000
activation = 6176 * 4 / 8
return ResourceUtilization(weights, activation, total_memory=weights + activation)

def compare(self, quantized_model, float_model, input_x=None, quantization_info: UserInformation = None):
# verify chosen activation bitwidth config
holder_layers = get_layers_from_model_by_type(quantized_model, KerasActivationQuantizationHolder)[1:]
activation_bits = [layer.activation_holder_quantizer.get_config()['num_bits'] for layer in holder_layers]
# TODO maxcut: restore activation_bits == [4, 4] and unique_tensor_values=16 when maxcut calculates tensor sizes
# of fused nodes correctly.
# TODO: maxcut Test updated but lowered activation ru (how can 4000 enforce 4,4??). Not sure what the fused nodes
# comment is about so I might be missing something. Elad?
self.unit_test.assertTrue((activation_bits == [4, 4]))

self.verify_quantization(quantized_model, input_x,
Expand Down

0 comments on commit e88aaf2

Please sign in to comment.