Skip to content

Commit 259874b

Browse files
Merge pull request #9 from QVPR/conda_pypi_fixes
Fixes for pypi/conda release
2 parents 051127d + 2177ecb commit 259874b

File tree

8 files changed

+69
-17
lines changed

8 files changed

+69
-17
lines changed
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# This workflows will upload a Python Package using Twine when a release is created
2+
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3+
4+
name: Upload Python Package
5+
6+
on:
7+
release:
8+
types: [created]
9+
10+
jobs:
11+
deploy:
12+
runs-on: ubuntu-latest
13+
steps:
14+
- uses: actions/checkout@v2
15+
- name: Set up Python
16+
uses: actions/setup-python@v2
17+
with:
18+
python-version: '3.x'
19+
- name: Install dependencies
20+
run: |
21+
python -m pip install --upgrade pip
22+
pip install setuptools wheel twine
23+
- name: Build and publish
24+
env:
25+
TWINE_USERNAME: __token__
26+
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
27+
run: |
28+
python setup.py sdist bdist_wheel
29+
twine upload dist/*

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
patchnetvlad/pretrained_models/*
55
patchnetvlad/results/*
66
patchnetvlad/output_features/*
7+
results/*
78

89
## Python stuff
910
# Byte-compiled / optimized / DLL files

feature_extract.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def main():
164164

165165
torch.cuda.empty_cache() # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the
166166
# memory after runs
167-
print('Done. Finished extracting and saving features')
167+
print('\n\nDone. Finished extracting and saving features')
168168

169169

170170
if __name__ == "__main__":

feature_match.py

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,9 @@ def compute_recall(gt, predictions, numQ, n_values, recall_str=''):
7373
def write_kapture_output(opt, eval_set, predictions, outfile_name):
7474
if not exists(opt.result_save_folder):
7575
os.mkdir(opt.result_save_folder)
76-
with open(join(opt.result_save_folder, outfile_name), 'w') as kap_out:
76+
outfile = join(opt.result_save_folder, outfile_name)
77+
print('Writing results to', outfile)
78+
with open(outfile, 'w') as kap_out:
7779
kap_out.write('# kapture format: 1.0\n')
7880
kap_out.write('# query_image, map_image\n')
7981
image_list_array = np.array(eval_set.images)
@@ -87,7 +89,9 @@ def write_kapture_output(opt, eval_set, predictions, outfile_name):
8789
def write_recalls_output(opt, recalls_netvlad, recalls_patchnetvlad, n_values):
8890
if not exists(opt.result_save_folder):
8991
os.mkdir(opt.result_save_folder)
90-
with open(join(opt.result_save_folder, 'recalls.txt'), 'w') as rec_out:
92+
outfile = join(opt.result_save_folder, 'recalls.txt')
93+
print('Writing recalls to', outfile)
94+
with open(outfile, 'w') as rec_out:
9195
for n in n_values:
9296
rec_out.write("Recall {}@{}: {:.4f}\n".format('NetVLAD', n, recalls_netvlad[n]))
9397
for n in n_values:
@@ -120,7 +124,8 @@ def feature_match(eval_set, device, opt, config):
120124
if config['feature_match']['pred_input_path'] != 'None':
121125
predictions = np.load(config['feature_match']['pred_input_path']) # optionally load predictions from a np file
122126
else:
123-
if opt.ground_truth_path.split('/')[1][:-4] == 'tokyo247':
127+
if opt.ground_truth_path and 'tokyo247' in opt.ground_truth_path:
128+
print('Tokyo24/7: Selecting only one of the 12 cutouts per panorama')
124129
# followed nnSearchPostprocess in https://github.com/Relja/netvlad/blob/master/datasets/dbTokyo247.m
125130
# noinspection PyArgumentList
126131
_, predictions = faiss_index.search(qFeat, max(n_values) * 12) # 12 cutouts per panorama
@@ -133,7 +138,7 @@ def feature_match(eval_set, device, opt, config):
133138
predictions = np.array(predictions_new)
134139
else:
135140
# noinspection PyArgumentList
136-
_, predictions = faiss_index.search(qFeat, max(n_values))
141+
_, predictions = faiss_index.search(qFeat, min(len(qFeat), max(n_values)))
137142

138143
reranked_predictions = local_matcher(predictions, eval_set, input_query_local_features_prefix,
139144
input_index_local_features_prefix, config, device)
@@ -142,16 +147,19 @@ def feature_match(eval_set, device, opt, config):
142147
write_kapture_output(opt, eval_set, predictions, 'NetVLAD_predictions.txt')
143148
write_kapture_output(opt, eval_set, reranked_predictions, 'PatchNetVLAD_predictions.txt')
144149

145-
print('Finished matching features. About to eval GT if GT was provided')
150+
print('Finished matching features.')
146151

147152
# for each query get those within threshold distance
148153
if opt.ground_truth_path is not None:
154+
print('Calculating recalls using ground truth.')
149155
gt = eval_set.get_positives()
150156

151157
global_recalls = compute_recall(gt, predictions, eval_set.numQ, n_values, 'NetVLAD')
152158
local_recalls = compute_recall(gt, reranked_predictions, eval_set.numQ, n_values, 'PatchNetVLAD')
153159

154160
write_recalls_output(opt, global_recalls, local_recalls, n_values)
161+
else:
162+
print('No ground truth was provided; not calculating recalls.')
155163

156164

157165
def main():

match_two.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def match_two(model, device, opt, config):
158158
scores, inlier_keypoints_one, inlier_keypoints_two = matcher.match(local_feats_one, local_feats_two)
159159
score = -normalise_func(scores, len(patch_sizes), patch_weights)
160160

161-
print("Similarity score between the two images is: '{:.5f}'. In this example, a larger score indicates a better match.".format(score))
161+
print(f"Similarity score between the two images is: {score:.5f}. Larger scores indicate better matches.")
162162

163163
if config['feature_match']['matcher'] == 'RANSAC':
164164
tqdm.write('====> Plotting Local Features')

patchnetvlad/models/local_matcher.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -149,17 +149,17 @@ def local_matcher(predictions, eval_set, input_query_local_features_prefix,
149149

150150
for q_idx, pred in enumerate(tqdm(predictions, leave=False, desc='Patch compare pred')):
151151
diffs = np.zeros((predictions.shape[1], len(patch_sizes)))
152-
image_name = os.path.splitext(os.path.basename(eval_set.images[eval_set.numDb + q_idx]))[0]
152+
image_name_query = os.path.splitext(os.path.basename(eval_set.images[eval_set.numDb + q_idx]))[0]
153153
qfeat = []
154154
for patch_size in patch_sizes:
155-
qfilename = input_query_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name + '.npy'
155+
qfilename = input_query_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name_query + '.npy'
156156
qfeat.append(torch.transpose(torch.tensor(np.load(qfilename), device=device), 0, 1))
157157
# we pre-transpose here to save compute speed
158158
for k, candidate in enumerate(pred):
159-
image_name = os.path.splitext(os.path.basename(eval_set.images[candidate]))[0]
159+
image_name_index = os.path.splitext(os.path.basename(eval_set.images[candidate]))[0]
160160
dbfeat = []
161161
for patch_size in patch_sizes:
162-
dbfilename = input_index_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name + '.npy'
162+
dbfilename = input_index_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name_index + '.npy'
163163
dbfeat.append(torch.tensor(np.load(dbfilename), device=device))
164164

165165
diffs[k, :], _, _ = matcher.match(qfeat, dbfeat)

patchnetvlad/tools/datasets.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@
3333
from PIL import Image
3434
from sklearn.neighbors import NearestNeighbors
3535

36+
from patchnetvlad.tools import PATCHNETVLAD_ROOT_DIR
37+
3638

3739
class PlaceDataset(data.Dataset):
3840
def __init__(self, query_file_path, index_file_path, dataset_root_dir, ground_truth_path, config):
@@ -52,6 +54,10 @@ def __init__(self, query_file_path, index_file_path, dataset_root_dir, ground_tr
5254
self.images = self.database
5355

5456
self.images = [os.path.join(dataset_root_dir, image) for image in self.images]
57+
# check if images are relative to root dir
58+
if not os.path.isfile(self.images[0]):
59+
if os.path.isfile(os.path.join(PATCHNETVLAD_ROOT_DIR, self.images[0])):
60+
self.images = [os.path.join(PATCHNETVLAD_ROOT_DIR, image) for image in self.images]
5561

5662
self.positives = None
5763
self.distances = None

setup.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import os
1+
import os, sys
22
from setuptools import setup, find_packages
33

44

@@ -9,8 +9,19 @@
99
long_description = f.read()
1010

1111

12+
install_require_list = [
13+
'numpy', 'torch', 'torchvision',
14+
'tqdm', 'scipy', 'Pillow', 'scikit-learn',
15+
'faiss', 'natsort']
16+
17+
# workaround as opencv-python does not show up in "pip list" within a conda environment
18+
# we do not care as conda recipe has py-opencv requirement anyhow
19+
is_conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
20+
if not is_conda:
21+
install_require_list.append('opencv-python')
22+
1223
setup(name='patchnetvlad',
13-
version='0.1.0',
24+
version='0.1.1',
1425
description='Patch-NetVLAD: An open-source Python implementation of the CVPR2021 paper',
1526
long_description = long_description,
1627
long_description_content_type='text/markdown',
@@ -37,10 +48,7 @@
3748
'Programming Language :: Python :: 3.9',
3849
],
3950
python_requires='>=3.6',
40-
install_requires=[
41-
'numpy', 'torch', 'torchvision',
42-
'tqdm', 'scipy', 'Pillow', 'scikit-learn',
43-
'faiss', 'natsort', 'opencv-python'],
51+
install_requires=install_require_list,
4452
packages=find_packages(),
4553
keywords=[
4654
'python', 'place recognition', 'image retrieval', 'computer vision', 'robotics'

0 commit comments

Comments
 (0)