diff --git a/examples/cluster/plot_color_quantization.py b/examples/cluster/plot_color_quantization.py index 0bda5c66ce4a33..7ef4ad63536549 100644 --- a/examples/cluster/plot_color_quantization.py +++ b/examples/cluster/plot_color_quantization.py @@ -84,21 +84,18 @@ def recreate_image(codebook, labels, w, h): # Display all results, alongside original image plt.figure(1) plt.clf() -ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Original image (96,615 colors)') plt.imshow(china) plt.figure(2) plt.clf() -ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, K-Means)') plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h)) plt.figure(3) plt.clf() -ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, Random)') plt.imshow(recreate_image(codebook_random, labels_random, w, h)) diff --git a/examples/cluster/plot_dict_face_patches.py b/examples/cluster/plot_dict_face_patches.py index ac2fde3e2cc6a7..6d33f01e6a7cbb 100644 --- a/examples/cluster/plot_dict_face_patches.py +++ b/examples/cluster/plot_dict_face_patches.py @@ -41,7 +41,6 @@ patch_size = (20, 20) buffer = [] -index = 1 t0 = time.time() # The online learning part: cycle over the whole dataset 6 times diff --git a/examples/cluster/plot_kmeans_stability_low_dim_dense.py b/examples/cluster/plot_kmeans_stability_low_dim_dense.py index b5d4326c5c7138..109d2097b6be9b 100644 --- a/examples/cluster/plot_kmeans_stability_low_dim_dense.py +++ b/examples/cluster/plot_kmeans_stability_low_dim_dense.py @@ -69,7 +69,7 @@ def make_data(random_state, n_samples_per_center, grid_size, scale): # Part 1: Quantitative evaluation of various init methods -fig = plt.figure() +plt.figure() plots = [] legends = [] @@ -105,7 +105,7 @@ def make_data(random_state, n_samples_per_center, grid_size, scale): km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1, random_state=random_state).fit(X) -fig = plt.figure() +plt.figure() for k in range(n_clusters): my_members = km.labels_ == k color = cm.spectral(float(k) / n_clusters, 1) diff --git a/examples/decomposition/plot_pca_3d.py b/examples/decomposition/plot_pca_3d.py index d9db17ffaec397..58494f7ef816d0 100644 --- a/examples/decomposition/plot_pca_3d.py +++ b/examples/decomposition/plot_pca_3d.py @@ -73,8 +73,6 @@ def plot_figs(fig_num, elev, azim): pca_score = pca.explained_variance_ratio_ V = pca.components_ - x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min() - x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]] y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]] diff --git a/examples/ensemble/plot_forest_iris.py b/examples/ensemble/plot_forest_iris.py index f0fd5dc7d003e7..73db88d829b1f2 100644 --- a/examples/ensemble/plot_forest_iris.py +++ b/examples/ensemble/plot_forest_iris.py @@ -46,7 +46,6 @@ import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap -from sklearn import clone from sklearn.datasets import load_iris from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier) @@ -90,10 +89,9 @@ X = (X - mean) / std # Train - clf = clone(model) - clf = model.fit(X, y) + model.fit(X, y) - scores = clf.score(X, y) + scores = model.score(X, y) # Create a title for each column and the console by using str() and # slicing away useless parts of the string model_title = str(type(model)).split( diff --git a/examples/gaussian_process/plot_gpc_isoprobability.py b/examples/gaussian_process/plot_gpc_isoprobability.py index 2a27434cf148f4..0639a65a384a4d 100644 --- a/examples/gaussian_process/plot_gpc_isoprobability.py +++ b/examples/gaussian_process/plot_gpc_isoprobability.py @@ -85,7 +85,7 @@ def g(x): plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12) -cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot') +plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot') cs = plt.contour(x1, x2, y_prob, [0.666], colors='b', linestyles='solid') diff --git a/examples/gaussian_process/plot_gpr_noisy_targets.py b/examples/gaussian_process/plot_gpr_noisy_targets.py index e90b5e57ad2575..8841f04a3987f9 100644 --- a/examples/gaussian_process/plot_gpr_noisy_targets.py +++ b/examples/gaussian_process/plot_gpr_noisy_targets.py @@ -61,7 +61,7 @@ def f(x): # Plot the function, the prediction and the 95% confidence interval based on # the MSE -fig = plt.figure() +plt.figure() plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') plt.plot(X, y, 'r.', markersize=10, label=u'Observations') plt.plot(x, y_pred, 'b-', label=u'Prediction') @@ -97,7 +97,7 @@ def f(x): # Plot the function, the prediction and the 95% confidence interval based on # the MSE -fig = plt.figure() +plt.figure() plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations') plt.plot(x, y_pred, 'b-', label=u'Prediction') diff --git a/examples/linear_model/plot_lasso_coordinate_descent_path.py b/examples/linear_model/plot_lasso_coordinate_descent_path.py index 7b6d2a52cae871..3cd96d6692e8d7 100644 --- a/examples/linear_model/plot_lasso_coordinate_descent_path.py +++ b/examples/linear_model/plot_lasso_coordinate_descent_path.py @@ -47,8 +47,6 @@ # Display results plt.figure(1) -ax = plt.gca() - colors = cycle(['b', 'r', 'g', 'c', 'k']) neg_log_alphas_lasso = -np.log10(alphas_lasso) neg_log_alphas_enet = -np.log10(alphas_enet) @@ -64,7 +62,6 @@ plt.figure(2) -ax = plt.gca() neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso) for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) @@ -78,7 +75,6 @@ plt.figure(3) -ax = plt.gca() neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet) for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors): l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c) diff --git a/examples/neighbors/plot_digits_kde_sampling.py b/examples/neighbors/plot_digits_kde_sampling.py index ba59fb5ece537a..8367d16b955feb 100644 --- a/examples/neighbors/plot_digits_kde_sampling.py +++ b/examples/neighbors/plot_digits_kde_sampling.py @@ -20,7 +20,6 @@ # load the data digits = load_digits() -data = digits.data # project the 64-dimensional data to a lower dimension pca = PCA(n_components=15, whiten=False) diff --git a/examples/tree/plot_tree_regression_multioutput.py b/examples/tree/plot_tree_regression_multioutput.py index 005f73683921b9..b47bfcd80e49a3 100644 --- a/examples/tree/plot_tree_regression_multioutput.py +++ b/examples/tree/plot_tree_regression_multioutput.py @@ -42,7 +42,6 @@ # Plot the results plt.figure() -s = 50 s = 25 plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, edgecolor="black", label="data") diff --git a/sklearn/decomposition/dict_learning.py b/sklearn/decomposition/dict_learning.py index 4164a459b31aec..e4b36d120773a8 100644 --- a/sklearn/decomposition/dict_learning.py +++ b/sklearn/decomposition/dict_learning.py @@ -824,7 +824,6 @@ def transform(self, X): check_is_fitted(self, 'components_') X = check_array(X) - n_samples, n_features = X.shape code = sparse_encode( X, self.components_, algorithm=self.transform_algorithm, diff --git a/sklearn/decomposition/factor_analysis.py b/sklearn/decomposition/factor_analysis.py index 975cd4cb765ac4..481a5e2322e3f6 100644 --- a/sklearn/decomposition/factor_analysis.py +++ b/sklearn/decomposition/factor_analysis.py @@ -326,7 +326,6 @@ def score_samples(self, X): Xr = X - self.mean_ precision = self.get_precision() n_features = X.shape[1] - log_like = np.zeros(X.shape[0]) log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) log_like -= .5 * (n_features * log(2. * np.pi) - fast_logdet(precision)) diff --git a/sklearn/decomposition/pca.py b/sklearn/decomposition/pca.py index 16b8619ac9019b..cbd688f3d748d3 100644 --- a/sklearn/decomposition/pca.py +++ b/sklearn/decomposition/pca.py @@ -550,7 +550,6 @@ def score_samples(self, X): X = check_array(X) Xr = X - self.mean_ n_features = X.shape[1] - log_like = np.zeros(X.shape[0]) precision = self.get_precision() log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) log_like -= .5 * (n_features * log(2. * np.pi) - diff --git a/sklearn/gaussian_process/gaussian_process.py b/sklearn/gaussian_process/gaussian_process.py index 53c519e5d5ac8b..5bc89d28df6b64 100644 --- a/sklearn/gaussian_process/gaussian_process.py +++ b/sklearn/gaussian_process/gaussian_process.py @@ -444,11 +444,6 @@ def predict(self, X, eval_MSE=False, batch_size=None): # Normalize input X = (X - self.X_mean) / self.X_std - # Initialize output - y = np.zeros(n_eval) - if eval_MSE: - MSE = np.zeros(n_eval) - # Get pairwise componentwise L1-distances to the input training set dx = manhattan_distances(X, Y=self.X, sum_over_features=False) # Get regression function and correlation diff --git a/sklearn/linear_model/least_angle.py b/sklearn/linear_model/least_angle.py index 17b988b08e6c70..bb7c12ab601a27 100644 --- a/sklearn/linear_model/least_angle.py +++ b/sklearn/linear_model/least_angle.py @@ -414,8 +414,6 @@ def lars_path(X, y, Xy=None, Gram=None, max_iter=500, alphas[-add_features:] = 0 coef = coefs[n_iter] prev_coef = coefs[n_iter - 1] - alpha = alphas[n_iter, np.newaxis] - prev_alpha = alphas[n_iter - 1, np.newaxis] else: # mimic the effect of incrementing n_iter on the array references prev_coef = coef diff --git a/sklearn/mixture/dpgmm.py b/sklearn/mixture/dpgmm.py index c2fd42ab458425..ddc861b4c19f08 100644 --- a/sklearn/mixture/dpgmm.py +++ b/sklearn/mixture/dpgmm.py @@ -273,7 +273,6 @@ def score_samples(self, X): X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] - z = np.zeros((X.shape[0], self.n_components)) sd = digamma(self.gamma_.T[1] + self.gamma_.T[2]) dgamma1 = digamma(self.gamma_.T[1]) - sd dgamma2 = np.zeros(self.n_components) @@ -844,7 +843,6 @@ def _bound_proportions(self, z): return logprior def _bound_concentration(self): - logprior = 0. logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components * self.alpha_) logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_)) diff --git a/sklearn/utils/extmath.py b/sklearn/utils/extmath.py index 70619673bea3b7..e95ceb57497aed 100644 --- a/sklearn/utils/extmath.py +++ b/sklearn/utils/extmath.py @@ -421,7 +421,6 @@ def weighted_mode(a, w, axis=0): else: a = np.asarray(a) w = np.asarray(w) - axis = axis if a.shape != w.shape: w = np.zeros(a.shape, dtype=w.dtype) + w