Skip to content

Commit

Permalink
[MRG+1] Fix warnings in lgtm.com (remove redundant code) (scikit-lear…
Browse files Browse the repository at this point in the history
  • Loading branch information
qinhanmin2014 authored and Joan Massich committed Sep 15, 2017
1 parent c62f6f0 commit 2192eb7
Show file tree
Hide file tree
Showing 17 changed files with 7 additions and 34 deletions.
3 changes: 0 additions & 3 deletions examples/cluster/plot_color_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,21 +84,18 @@ def recreate_image(codebook, labels, w, h):
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)

plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))

plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
Expand Down
1 change: 0 additions & 1 deletion examples/cluster/plot_dict_face_patches.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
patch_size = (20, 20)

buffer = []
index = 1
t0 = time.time()

# The online learning part: cycle over the whole dataset 6 times
Expand Down
4 changes: 2 additions & 2 deletions examples/cluster/plot_kmeans_stability_low_dim_dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def make_data(random_state, n_samples_per_center, grid_size, scale):

# Part 1: Quantitative evaluation of various init methods

fig = plt.figure()
plt.figure()
plots = []
legends = []

Expand Down Expand Up @@ -105,7 +105,7 @@ def make_data(random_state, n_samples_per_center, grid_size, scale):
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)

fig = plt.figure()
plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
Expand Down
2 changes: 0 additions & 2 deletions examples/decomposition/plot_pca_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,6 @@ def plot_figs(fig_num, elev, azim):
pca_score = pca.explained_variance_ratio_
V = pca.components_

x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()

x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
Expand Down
6 changes: 2 additions & 4 deletions examples/ensemble/plot_forest_iris.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap

from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
Expand Down Expand Up @@ -90,10 +89,9 @@
X = (X - mean) / std

# Train
clf = clone(model)
clf = model.fit(X, y)
model.fit(X, y)

scores = clf.score(X, y)
scores = model.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(
Expand Down
2 changes: 1 addition & 1 deletion examples/gaussian_process/plot_gpc_isoprobability.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def g(x):

plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)

cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')

cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
Expand Down
4 changes: 2 additions & 2 deletions examples/gaussian_process/plot_gpr_noisy_targets.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def f(x):

# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
Expand Down Expand Up @@ -97,7 +97,7 @@ def f(x):

# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
Expand Down
4 changes: 0 additions & 4 deletions examples/linear_model/plot_lasso_coordinate_descent_path.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@
# Display results

plt.figure(1)
ax = plt.gca()

colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
Expand All @@ -64,7 +62,6 @@


plt.figure(2)
ax = plt.gca()
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
Expand All @@ -78,7 +75,6 @@


plt.figure(3)
ax = plt.gca()
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
Expand Down
1 change: 0 additions & 1 deletion examples/neighbors/plot_digits_kde_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

# load the data
digits = load_digits()
data = digits.data

# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
Expand Down
1 change: 0 additions & 1 deletion examples/tree/plot_tree_regression_multioutput.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@

# Plot the results
plt.figure()
s = 50
s = 25
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s,
edgecolor="black", label="data")
Expand Down
1 change: 0 additions & 1 deletion sklearn/decomposition/dict_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,6 @@ def transform(self, X):
check_is_fitted(self, 'components_')

X = check_array(X)
n_samples, n_features = X.shape

code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
Expand Down
1 change: 0 additions & 1 deletion sklearn/decomposition/factor_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,6 @@ def score_samples(self, X):
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
Expand Down
1 change: 0 additions & 1 deletion sklearn/decomposition/pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -550,7 +550,6 @@ def score_samples(self, X):
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) -
Expand Down
5 changes: 0 additions & 5 deletions sklearn/gaussian_process/gaussian_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,11 +444,6 @@ def predict(self, X, eval_MSE=False, batch_size=None):
# Normalize input
X = (X - self.X_mean) / self.X_std

# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)

# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
Expand Down
2 changes: 0 additions & 2 deletions sklearn/linear_model/least_angle.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,8 +414,6 @@ def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alphas[-add_features:] = 0
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
Expand Down
2 changes: 0 additions & 2 deletions sklearn/mixture/dpgmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@ def score_samples(self, X):
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
Expand Down Expand Up @@ -844,7 +843,6 @@ def _bound_proportions(self, z):
return logprior

def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
Expand Down
1 change: 0 additions & 1 deletion sklearn/utils/extmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,6 @@ def weighted_mode(a, w, axis=0):
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis

if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
Expand Down

0 comments on commit 2192eb7

Please sign in to comment.