-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathParallel_ImageHash.py
More file actions
194 lines (145 loc) · 6.22 KB
/
Parallel_ImageHash.py
File metadata and controls
194 lines (145 loc) · 6.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import json
import os
import shutil
import threading
import time
from multiprocessing import Pool
from multiprocessing import cpu_count
import imagehash
import numpy as np
from PIL import Image
from imutils import paths
# This function takes a list of yields subsets of that chunks according to input length.
def chunk(input_list, length_of_chunk):
for iterator in range(0, len(input_list), length_of_chunk):
yield input_list[iterator: iterator + length_of_chunk]
# Return hash as string
def convert_hash(h):
return str(np.array(h))
# This function is called once per process/thread in techniques 1 and 2 and hashes the images its given and returns a list of the hashes.
def process_images(payload):
print("Starting Process {}".format(payload["id"]))
hashes = {}
for imagePath in payload["input_paths"]:
image = Image.open(imagePath)
image_hash = imagehash.average_hash(image)
image_hash = convert_hash(image_hash)
hash_list = hashes.get(image_hash, [])
hash_list.append(imagePath)
hashes[image_hash] = hash_list
print("Process {} Serializing".format(payload["id"]))
f = open(payload["output_path"], "w")
f.write(json.dumps(hashes))
f.close()
# This function is used for technique 3 using both threads and processes.
# Each process calls the process_images_per_thread with the number of threads per process which in turn splits and hashes the images accordingly.
def process_images_processes_threads(payload):
print("starting thread {}".format(payload["id"]))
# CHANGE NUMBER OF THREADS PER PROCESS HERE
num_threads_per_proc = 4
num_images = len(payload["input_paths"]) / num_threads_per_proc
num_images = int(np.ceil(num_images))
chunked_paths = list(chunk(payload["input_paths"], num_images))
thread_payloads = []
for (iterator, image_paths) in enumerate(chunked_paths):
output_path = os.path.sep.join(["output", "proc_{}_thread_{}.json".format(iterator, threading.current_thread().ident)])
data_t = {
"id": iterator,
"input_paths": image_paths,
"output_path": output_path
}
thread_payloads.append(data_t)
threads = []
for payload in thread_payloads:
thread = threading.Thread(target=process_images_per_thread, args=(payload,))
threads.append(thread)
print(threads)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# This function process images and is called once for every thread in every process for technique 3.
def process_images_per_thread(payload):
print("starting process {}".format(payload["id"]))
hashes = {}
payload["output_path"] = os.path.sep.join(["output", "proc_{}_thread_{}.json".format(payload["id"], threading.current_thread().ident)])
for imagePath in payload["input_paths"]:
image = Image.open(imagePath)
image_hash = imagehash.average_hash(image)
image_hash = convert_hash(image_hash)
hash_list = hashes.get(image_hash, [])
hash_list.append(imagePath)
hashes[image_hash] = hash_list
print("Process {} Serializing".format(payload["id"]))
f = open(payload["output_path"], "w")
f.write(json.dumps(hashes))
f.close()
def processes_technique(images):
# ================================================
# MULTIPROCESSING USING DIFFERENT PROCESSES
print("Launching Pool using {} Processors".format(procs))
pool = Pool(processes=procs)
pool.map(process_images, images)
print("Waiting for processes to finish...")
pool.close()
pool.join()
def threads_technique(images):
# ================================================
# THREADING TECHNIQUE USING THREADS
print("Launching {} Threads".format(procs))
threads = []
for payload in images:
thread = threading.Thread(target=process_images, args=(payload,))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
print("Starting Thread {}".format(thread.getName()))
thread.join()
def threads_and_processes_technique(images):
# ================================================
# BOTH THREADING AND MULTIPROCESSING
print("Launching Pool using {} Processors".format(procs))
pool = Pool(processes=procs)
pool.map(process_images_processes_threads, images)
print("Waiting for processes to finish...")
pool.close()
pool.join()
# Resests output folder to avoid mixing inputs between techniques.
def deletefiles():
for file in os.listdir("output"):
file_path = os.path.join("output", file)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as error:
print('Failed to delete %s. Reason: %s' % (file_path, error))
if __name__ == "__main__":
deletefiles()
# CHANGE NUMBER OF CPUS/THREADS FOR TECHNIQUES 1 and 2 HERE
procs = cpu_count()
print("Collecting Image Paths...")
allImagePaths = sorted(list(paths.list_images('Input_Images')))
numImagesPerProc = len(allImagePaths) / float(procs)
numImagesPerProc = int(np.ceil(numImagesPerProc))
chunkedPaths = list(chunk(allImagePaths, numImagesPerProc))
payloads = []
# Build payloads by having a list of objects which each contain an id, an output path, and a list of input paths for the images.
for (i, imagepPaths) in enumerate(chunkedPaths):
outputPath = os.path.sep.join(["output", "proc_{}.json".format(i)])
data = {
"id": i,
"input_paths": imagepPaths,
"output_path": outputPath
}
payloads.append(data)
start_time = time.time()
# COMMENT OUT THE UNUSED TECHNIQUES
processes_technique(payloads)
# threads_technique(payloads)
# threads_and_processes_technique(payloads)
print("Multiprocessing Complete")
print("Time Taken:")
print(time.time() - start_time)