4
4
import atexit
5
5
import json
6
6
import shutil
7
- import os
8
- from multiprocessing . pool import ThreadPool , Pool as ProcessPool
7
+ from multiprocessing . pool import ThreadPool
8
+ from multiprocessing import Pool as ProcessPool
9
9
from multiprocessing import cpu_count
10
10
import tempfile
11
+ import traceback
12
+ import sys
11
13
12
14
13
15
import numpy as np
@@ -50,20 +52,28 @@ def init_attributes(self, store, read_only=False):
50
52
51
53
52
54
def _append (arg ):
53
- z , i = arg
54
- import numpy as np
55
- x = np .empty (1000 , dtype = 'i4' )
56
- x [:] = i
57
- shape = z .append (x )
58
- return shape
55
+ try :
56
+ z , i = arg
57
+ import numpy as np
58
+ x = np .empty (1000 , dtype = 'i4' )
59
+ x [:] = i
60
+ shape = z .append (x )
61
+ return shape
62
+ except Exception as e :
63
+ traceback .print_exc (file = sys .stderr )
64
+ raise
59
65
60
66
61
67
def _set_arange (arg ):
62
- z , i = arg
63
- import numpy as np
64
- x = np .arange (i * 1000 , (i * 1000 )+ 1000 , 1 )
65
- z [i * 1000 :(i * 1000 )+ 1000 ] = x
66
- return i
68
+ try :
69
+ z , i = arg
70
+ import numpy as np
71
+ x = np .arange (i * 1000 , (i * 1000 )+ 1000 , 1 )
72
+ z [i * 1000 :(i * 1000 )+ 1000 ] = x
73
+ return i
74
+ except Exception as e :
75
+ traceback .print_exc (file = sys .stderr )
76
+ raise
67
77
68
78
69
79
class MixinArraySyncTests (object ):
@@ -77,15 +87,15 @@ def test_parallel_setitem(self):
77
87
pool = self .create_pool ()
78
88
79
89
# parallel setitem
80
- future = pool .map_async (_set_arange , zip ([arr ] * n , range (n )))
81
- results = sorted (future .get (60 ))
82
- pool .close ()
83
- pool .terminate ()
90
+ results = pool .map (_set_arange , zip ([arr ] * n , range (n )), chunksize = 1 )
91
+ results = sorted (results )
84
92
85
93
print (results )
86
94
eq (list (range (n )), results )
87
95
assert_array_equal (np .arange (n * 1000 ), arr [:])
88
96
97
+ pool .terminate ()
98
+
89
99
def test_parallel_append (self ):
90
100
n = 100
91
101
@@ -95,15 +105,15 @@ def test_parallel_append(self):
95
105
pool = self .create_pool ()
96
106
97
107
# parallel append
98
- future = pool .map_async (_append , zip ([arr ] * n , range (n )))
99
- results = sorted (future .get (60 ))
100
- pool .close ()
101
- pool .terminate ()
108
+ results = pool .map (_append , zip ([arr ] * n , range (n )), chunksize = 1 )
109
+ results = sorted (results )
102
110
103
111
print (results )
104
112
eq ([((i + 2 )* 1000 ,) for i in range (n )], results )
105
113
eq (((n + 1 )* 1000 ,), arr .shape )
106
114
115
+ pool .terminate ()
116
+
107
117
108
118
class TestArrayWithThreadSynchronizer (TestArray , MixinArraySyncTests ):
109
119
@@ -136,12 +146,13 @@ def create_pool(self):
136
146
class TestArrayWithProcessSynchronizer (TestArray , MixinArraySyncTests ):
137
147
138
148
def create_array (self , read_only = False , ** kwargs ):
139
- path = 'test_sync'
140
- if os .path .exists (path ):
141
- shutil .rmtree (path )
149
+ path = tempfile .mkdtemp ()
150
+ atexit .register (atexit_rmtree , path )
142
151
store = DirectoryStore (path )
143
152
init_array (store , ** kwargs )
144
- synchronizer = ProcessSynchronizer ('test_sync_locks' )
153
+ sync_path = tempfile .mkdtemp ()
154
+ atexit .register (atexit_rmtree , sync_path )
155
+ synchronizer = ProcessSynchronizer (sync_path )
145
156
return Array (store , synchronizer = synchronizer ,
146
157
read_only = read_only , cache_metadata = False )
147
158
@@ -161,7 +172,7 @@ def test_repr(self):
161
172
eq (l1 , l2 )
162
173
163
174
def create_pool (self ):
164
- pool = ProcessPool (cpu_count ())
175
+ pool = ProcessPool (processes = cpu_count ())
165
176
return pool
166
177
167
178
@@ -187,15 +198,20 @@ def test_parallel_create_group(self):
187
198
188
199
# parallel create group
189
200
n = 100
190
- future = pool .map_async (
191
- _create_group , zip ([g ] * n , [str (i ) for i in range (n )]))
192
- results = sorted (future .get (60 ))
201
+ results = pool .map (
202
+ _create_group ,
203
+ zip ([g ] * n , [str (i ) for i in range (n )]),
204
+ chunksize = 1
205
+ )
206
+ results = sorted (results )
193
207
pool .close ()
194
208
pool .terminate ()
195
209
196
210
print (results )
197
211
eq (n , len (g ))
198
212
213
+ pool .terminate ()
214
+
199
215
def test_parallel_require_group (self ):
200
216
201
217
# setup
@@ -204,15 +220,20 @@ def test_parallel_require_group(self):
204
220
205
221
# parallel require group
206
222
n = 100
207
- future = pool .map_async (
208
- _require_group , zip ([g ] * n , [str (i // 10 ) for i in range (n )]))
209
- results = sorted (future .get (60 ))
223
+ results = pool .map (
224
+ _require_group ,
225
+ zip ([g ] * n , [str (i // 10 ) for i in range (n )]),
226
+ chunksize = 1
227
+ )
228
+ results = sorted (results )
210
229
pool .close ()
211
230
pool .terminate ()
212
231
213
232
print (results )
214
233
eq (n // 10 , len (g ))
215
234
235
+ pool .terminate ()
236
+
216
237
217
238
class TestGroupWithThreadSynchronizer (TestGroup , MixinGroupSyncTests ):
218
239
@@ -247,9 +268,8 @@ def test_synchronizer_property(self):
247
268
class TestGroupWithProcessSynchronizer (TestGroup , MixinGroupSyncTests ):
248
269
249
270
def create_store (self ):
250
- path = 'test_sync'
251
- if os .path .exists (path ):
252
- shutil .rmtree (path )
271
+ path = tempfile .mkdtemp ()
272
+ atexit .register (atexit_rmtree , path )
253
273
store = DirectoryStore (path )
254
274
return store , None
255
275
@@ -258,13 +278,15 @@ def create_group(self, store=None, path=None, read_only=False,
258
278
if store is None :
259
279
store , chunk_store = self .create_store ()
260
280
init_group (store , path = path , chunk_store = chunk_store )
261
- synchronizer = ProcessSynchronizer ('test_sync_locks' )
281
+ sync_path = tempfile .mkdtemp ()
282
+ atexit .register (atexit_rmtree , sync_path )
283
+ synchronizer = ProcessSynchronizer (sync_path )
262
284
g = Group (store , path = path , read_only = read_only ,
263
285
synchronizer = synchronizer , chunk_store = chunk_store )
264
286
return g
265
287
266
288
def create_pool (self ):
267
- pool = ProcessPool (cpu_count ())
289
+ pool = ProcessPool (processes = cpu_count ())
268
290
return pool
269
291
270
292
def test_group_repr (self ):
0 commit comments