Skip to content

Commit 667f00b

Browse files
authored
Merge pull request #93 from kundajelab/targetlayerwarningfix
Target layer warning fix
2 parents 0888551 + 58945b1 commit 667f00b

File tree

5 files changed

+28
-54
lines changed

5 files changed

+28
-54
lines changed

deeplift.egg-info/PKG-INFO

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
Metadata-Version: 2.1
22
Name: deeplift
3-
Version: 0.6.9.2
3+
Version: 0.6.9.3
44
Summary: DeepLIFT (Deep Learning Important FeaTures)
55
Home-page: https://github.com/kundajelab/deeplift
66
License: UNKNOWN

deeplift/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '0.6.9.2'
1+
__version__ = '0.6.9.3'

deeplift/conversion/kerasapi_conversion.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,9 @@ def conv2d_conversion(config,
119119
name=("preact_" if len(converted_activation) > 0
120120
else "")+name,
121121
kernel=config[KerasKeys.weights][0],
122-
bias=config[KerasKeys.weights][1],
122+
bias=(config[KerasKeys.weights][1] if
123+
len(config[KerasKeys.weights]) > 1
124+
else np.zeros(config[KerasKeys.weights][0].shape[-1])),
123125
strides=config[KerasKeys.strides],
124126
padding=config[KerasKeys.padding].upper(),
125127
data_format=config[KerasKeys.data_format],
@@ -150,7 +152,9 @@ def conv1d_conversion(config,
150152
name=("preact_" if len(converted_activation) > 0
151153
else "")+name,
152154
kernel=config[KerasKeys.weights][0],
153-
bias=config[KerasKeys.weights][1],
155+
bias=(config[KerasKeys.weights][1] if
156+
len(config[KerasKeys.weights]) > 1
157+
else np.zeros(config[KerasKeys.weights][0].shape[-1])),
154158
stride=config[KerasKeys.strides],
155159
padding=config[KerasKeys.padding].upper(),
156160
conv_mxts_mode=conv_mxts_mode)]
@@ -177,7 +181,9 @@ def dense_conversion(config,
177181
name=("preact_" if len(converted_activation) > 0
178182
else "")+name,
179183
kernel=config[KerasKeys.weights][0],
180-
bias=config[KerasKeys.weights][1],
184+
bias=(config[KerasKeys.weights][1] if
185+
len(config[KerasKeys.weights]) > 1
186+
else np.zeros(config[KerasKeys.weights][0].shape[-1])),
181187
verbose=verbose,
182188
dense_mxts_mode=dense_mxts_mode)]
183189
to_return.extend(converted_activation)

deeplift/models.py

Lines changed: 16 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ def get_target_contribs_of_input_with_filter_ref_func(
123123
**kwargs)
124124

125125
def _set_scoring_mode_for_target_layer(self, target_layer):
126+
print("TARGET LAYER SET TO "+str(target_layer.get_name()))
126127
if (deeplift.util.is_type(target_layer,
127128
layers.Activation)):
128129
raise RuntimeError("You set the target layer to an"
@@ -131,55 +132,22 @@ def _set_scoring_mode_for_target_layer(self, target_layer):
131132
+" to set the target layer to the layer *before*"
132133
+" the activation layer instead? (recommended for "
133134
+" classification)")
134-
if (len(target_layer.get_output_layers())==0):
135-
scoring_mode=ScoringMode.OneAndZeros
136-
else:
137-
assert len(target_layer.get_output_layers())==1,\
138-
"at most one output was expected for target layer "\
139-
+str(target_layer.get_name())+" but got: "+\
140-
str(target_layer.get_output_layers())
141-
final_activation_layer = target_layer.get_output_layers()[0]
142-
if (deeplift.util.is_type(final_activation_layer,
143-
layers.Activation)==False):
144-
raise RuntimeError("There is a layer after your target"
145-
+" layer but it is not an activation layer"
146-
+", which seems odd...if doing regression, make"
147-
+" sure to set the target layer to the last layer")
148-
deeplift.util.assert_is_type(final_activation_layer,
149-
layers.Activation,
150-
"final_activation_layer")
151-
final_activation_type = type(final_activation_layer).__name__
152-
153-
if (final_activation_type == "Sigmoid"):
154-
scoring_mode=ScoringMode.OneAndZeros
155-
elif (final_activation_type == "Softmax"):
156-
#new_W, new_b =\
157-
# deeplift.util.get_mean_normalised_softmax_weights(
158-
# target_layer.W, target_layer.b)
159-
#The weights need to be mean normalised before they are
160-
#passed in because build_fwd_pass_vars() has already
161-
#been called before this function is called,
162-
#because get_output_layers() (used in this function)
163-
#is updated during the build_fwd_pass_vars()
164-
#call - that is why I can't simply mean-normalise
165-
#the weights right here :-( (It is a pain and a
166-
#recipe for bugs to rebuild the forward pass
167-
#vars after they have already been built - in
168-
#particular for a model that branches because where
169-
#the branches unify you need really want them to be
170-
#using the same symbolic variables - no use having
171-
#needlessly complicated/redundant graphs and if a node
172-
#is common to two outputs, so should its symbolic vars
173-
#TODO: I should put in a 'reset_fwd_pass' function and use
174-
#it to invalidate the _built_fwd_pass_vars cache and recompile
175-
#if (np.allclose(target_layer.W, new_W)==False):
176-
# print("Consider mean-normalising softmax layer")
177-
#assert np.allclose(target_layer.b, new_b),\
178-
# "Please mean-normalise weights and biases of softmax layer"
135+
scoring_mode=ScoringMode.OneAndZeros
136+
if (len(target_layer.get_output_layers())>0):
137+
if (len(target_layer.get_output_layers())>1):
138+
print("WARNING: the target layer"
139+
+str(target_layer.get_name())
140+
+" has multiple output layers"
141+
+str(target_layer.get_output_layers()))
142+
else:
143+
final_activation_layer = target_layer.get_output_layers()[0]
144+
if (deeplift.util.is_type(final_activation_layer,
145+
layers.Activation)==False):
146+
print("\n\nWARNING!!! There is a layer after your target"
147+
+" layer but it is not an activation layer"
148+
+", which is unusual; double check you have set"
149+
+" the target layer correctly.\n\n")
179150
scoring_mode=ScoringMode.OneAndZeros
180-
else:
181-
raise RuntimeError("Unsupported final_activation_type: "
182-
+final_activation_type)
183151
target_layer.set_scoring_mode(scoring_mode)
184152

185153
def save_to_yaml_only(self, file_name):

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
Implements the methods in "Learning Important Features Through Propagating Activation Differences" by Shrikumar, Greenside & Kundaje, as well as other commonly-used methods such as gradients, guided backprop and integrated gradients. See https://github.com/kundajelab/deeplift for documentation and FAQ.
99
""",
1010
url='https://github.com/kundajelab/deeplift',
11-
version='0.6.9.2',
11+
version='0.6.9.3',
1212
packages=['deeplift',
1313
'deeplift.layers', 'deeplift.visualization',
1414
'deeplift.conversion'],

0 commit comments

Comments
 (0)