Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
This repository was archived by the owner on Apr 14, 2024. It is now read-only.

Commit339024b

Browse files
committed
Better Python 2 / 3 compatibility
This fixes the breaking changes for Python 2 that were made in thelast attempt and improves Python 3 compatibility a bit more.Python 3 supports the new `file` argument to the `print` functionthat allows you to specify where to print to. In the code, thiswas being used to print to stderr. Unfortunately Python 2 does notyet support this, so we are now using `sys.stderr.write` instead.Python 2's `Queue` module was renamed to `queue` in Python 3. Weshould be importing them both to ensure that the code runs on bothversions. By default, we are assuming that Python 3 will be usedand are using it as the first import before falling back to the oldname.`threading._sleep` was a private method and was removed in Python 3.It has been replaced by `time.sleep` for now, as that is both publicand consistent across versions.When determining chunk sizes and the number of chunks, there was apossibility of floating values being passed in or calculated. Toensure that the generated range does not throw an exception, we aremanually casting these to integers.The iterator was previously changed so `next` became `__next__`,which was the Python 3 method for the `next` function. This addsPython 2 compatibility alongside of these changes.
1 parent9a25914 commit339024b

9 files changed

+82
-52
lines changed

‎async/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def thread_interrupt_handler(signum, frame):
3030
signal.signal(signal.SIGINT,thread_interrupt_handler)
3131
exceptValueError:
3232
# happens if we don't try it from the main thread
33-
print("Failed to setup thread-interrupt handler. This is usually not critical",file=sys.stderr)
33+
sys.stderr.write("Failed to setup thread-interrupt handler. This is usually not critical")
3434
# END exception handling
3535

3636

‎async/channel.py

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,15 @@
33
# This module is part of async and is released under
44
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
55
"""Contains a queue based channel implementation"""
6-
fromqueueimport (
7-
Empty,
8-
Full
6+
try:
7+
fromqueueimport (
8+
Empty,
9+
Full
10+
)
11+
exceptImportError:
12+
fromQueueimport (
13+
Empty,
14+
Full
915
)
1016

1117
from .utilimport (
@@ -161,6 +167,10 @@ def __next__(self):
161167
returnitems[0]
162168
raiseStopIteration
163169

170+
defnext(self):
171+
"""Support the Python 2 iterator syntax"""
172+
returnself.__next__()
173+
164174
#} END iterator protocol
165175

166176

@@ -332,7 +342,7 @@ class IteratorReader(Reader):
332342

333343
def__init__(self,iterator):
334344
self._empty=False
335-
ifnothasattr(iterator,'next'):
345+
ifnothasattr(iterator,'next')andnot (hasattr(iterator,"__next__")):
336346
raiseValueError("Iterator %r needs a next() function"%iterator)
337347
self._iter=iterator
338348
self._lock=self.lock_type()

‎async/pool.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,15 @@
1414
DummyLock
1515
)
1616

17-
fromqueueimport (
18-
Queue,
19-
Empty
17+
try:
18+
fromqueueimport (
19+
Queue,
20+
Empty
21+
)
22+
exceptImportError:
23+
fromQueueimport (
24+
Queue,
25+
Empty
2026
)
2127

2228
from .graphimportGraph
@@ -277,6 +283,10 @@ def _prepare_channel_read(self, task, count):
277283
remainder=actual_count- (numchunks*chunksize)
278284
# END handle chunking
279285

286+
# These both need to be integers, not floats
287+
chunksize=int(chunksize)
288+
numchunks=int(numchunks)
289+
280290
# the following loops are kind of unrolled - code duplication
281291
# should make things execute faster. Putting the if statements
282292
# into the loop would be less code, but ... slower

‎async/test/test_channel.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,11 @@
33
# This module is part of async and is released under
44
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
55
"""Channel testing"""
6-
from .libimport*
7-
fromasync.channelimport*
6+
from .libimportTestBase
7+
fromasync.channelimport (
8+
CallbackChannelWriter,CallbackChannelReader,ChannelWriter,ChannelReader,
9+
IteratorReader,mkchannel,ReadOnly
10+
)
811

912
importtime
1013

‎async/test/test_performance.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def test_base(self):
3838
ifread_mode==1:
3939
mode_info="read(1) * %i"%ni
4040
# END mode info
41-
fmt="Threadcount=%%i: Produced %%i items using %s in %%i transformations in %%f s (%%f items / s)"%mode_info
41+
fmt="Threadcount=%%i: Produced %%i items using %s in %%i transformations in %%f s (%%f items / s)\n"%mode_info
4242
reader=rcs[-1]
4343
st=time.time()
4444
ifread_mode==1:
@@ -49,7 +49,7 @@ def test_base(self):
4949
assertlen(reader.read(0))==ni
5050
# END handle read mode
5151
elapsed=time.time()-st
52-
print(fmt% (num_threads,ni,num_transformers,elapsed,ni/elapsed),file=sys.stderr)
52+
sys.stderr.write(fmt% (num_threads,ni,num_transformers,elapsed,ni/elapsed))
5353
# END for each read-mode
5454
# END for each amount of processors
5555
# END for each thread count

‎async/test/test_pool.py

Lines changed: 31 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ class TestThreadPool(TestBase):
2323

2424
def_assert_single_task(self,p,async=False):
2525
"""Performs testing in a synchronized environment"""
26-
print("Threadpool: Starting single task (async = %i) with %i threads"% (async,p.size()),file=sys.stderr)
26+
sys.stderr.write("Threadpool: Starting single task (async = %i) with %i threads\n"% (async,p.size()))
2727
null_tasks=p.num_tasks()# in case we had some before
2828

2929
# add a simple task
@@ -71,10 +71,10 @@ def _assert_single_task(self, p, async=False):
7171
asserti==items[0]
7272
# END for each item
7373
elapsed=time.time()-st
74-
print("Threadpool: processed %i individual items, with %i threads, one at a time, in %f s ( %f items / s )"% (ni,p.size(),elapsed,ni/elapsed),file=sys.stderr)
75-
76-
# it couldn't yet notice that the input is depleted as we pulled exaclty
77-
# ni items - the next one would remove it. Instead, we delete our channel
74+
sys.stderr.write("Threadpool: processed %i individual items, with %i threads, one at a time, in %f s ( %f items / s )\n"% (ni,p.size(),elapsed,ni/elapsed))
75+
76+
# it couldn't yet notice that the input is depleted as we pulled exaclty
77+
# ni items - the next one would remove it. Instead, we delete our channel
7878
# which triggers orphan handling
7979
assertnottask.is_done()
8080
assertp.num_tasks()==1+null_tasks
@@ -162,8 +162,8 @@ def _assert_single_task(self, p, async=False):
162162
# END pull individual items
163163
# too many processing counts ;)
164164
elapsed=time.time()-st
165-
print("Threadpool: processed %i individual items in chunks of %i, with %i threads, one at a time, in %f s ( %f items / s )"% (ni,ni/4,p.size(),elapsed,ni/elapsed),file=sys.stderr)
166-
165+
sys.stderr.write("Threadpool: processed %i individual items in chunks of %i, with %i threads, one at a time, in %f s ( %f items / s)\n"% (ni,ni/4,p.size(),elapsed,ni/elapsed))
166+
167167
task._assert(ni,ni)
168168
assertp.num_tasks()==1+null_tasks
169169
assertp.remove_task(task)isp# del manually this time
@@ -207,17 +207,17 @@ def _assert_single_task(self, p, async=False):
207207
assertlen(rc.read())==nri
208208
asserttask.is_done()
209209
assertisinstance(task.error(),AssertionError)
210-
211-
print("done with everything",file=sys.stderr)
212-
213-
214-
210+
211+
sys.stderr.write("done with everything\n")
212+
213+
214+
215215
def_assert_async_dependent_tasks(self,pool):
216216
# includes failure in center task, 'recursive' orphan cleanup
217217
# This will also verify that the channel-close mechanism works
218218
# t1 -> t2 -> t3
219-
220-
print("Threadpool: starting async dependency test in %i threads"%pool.size(),file=sys.stderr)
219+
220+
sys.stderr.write("Threadpool: starting async dependency test in %i threads\n"%pool.size())
221221
null_tasks=pool.num_tasks()
222222
ni=1000
223223
count=3
@@ -243,9 +243,9 @@ def _assert_async_dependent_tasks(self, pool):
243243
time.sleep(0.15)
244244
assertsys.getrefcount(ts[-1])==2# ts + call
245245
assertsys.getrefcount(ts[0])==2# ts + call
246-
print("Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )"% (ni,aic,elapsed,ni/elapsed),file=sys.stderr)
247-
248-
246+
sys.stderr.write("Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )\n"% (ni,aic,elapsed,ni/elapsed))
247+
248+
249249
# read(1)
250250
#########
251251
ts,rcs=make_task()
@@ -257,9 +257,9 @@ def _assert_async_dependent_tasks(self, pool):
257257
elapsed_single=time.time()-st
258258
# another read yields nothing, its empty
259259
assertlen(rcs[-1].read())==0
260-
print("Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )"% (ni,aic,elapsed_single,ni/elapsed_single),file=sys.stderr)
261-
262-
260+
sys.stderr.write("Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )\n"% (ni,aic,elapsed_single,ni/elapsed_single))
261+
262+
263263
# read with min-count size
264264
###########################
265265
# must be faster, as it will read ni / 4 chunks
@@ -277,8 +277,8 @@ def _assert_async_dependent_tasks(self, pool):
277277
elapsed_minsize=time.time()-st
278278
# its empty
279279
assertlen(rcs[-1].read())==0
280-
print("Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )"% (ni,nri,aic,elapsed_minsize,ni/elapsed_minsize),file=sys.stderr)
281-
280+
sys.stderr.write("Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )\n"% (ni,nri,aic,elapsed_minsize,ni/elapsed_minsize))
281+
282282
# it should have been a bit faster at least, and most of the time it is
283283
# Sometimes, its not, mainly because:
284284
# * The test tasks lock a lot, hence they slow down the system
@@ -319,10 +319,10 @@ def _assert_async_dependent_tasks(self, pool):
319319
items=p2rcs[-1].read()
320320
elapsed=time.time()-st
321321
assertlen(items)==ni
322-
323-
print("Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )"% (ni,aic+aic-1,elapsed,ni/elapsed),file=sys.stderr)
324-
325-
322+
323+
sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )\n"% (ni,aic+aic-1,elapsed,ni/elapsed))
324+
325+
326326
# loose the handles of the second pool to allow others to go as well
327327
del(p2rcs);del(p2ts)
328328
assertp2.num_tasks()==0
@@ -344,9 +344,9 @@ def _assert_async_dependent_tasks(self, pool):
344344
# END for each item to get
345345
elapsed=time.time()-st
346346
del(reader)# decrement refcount
347-
348-
print("Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )"% (ni,aic+aic-1,elapsed,ni/elapsed),file=sys.stderr)
349-
347+
348+
sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )\n"% (ni,aic+aic-1,elapsed,ni/elapsed))
349+
350350
# another read is empty
351351
assertlen(rcs[-1].read())==0
352352

@@ -481,6 +481,5 @@ def test_base(self):
481481
# DEPENDENT TASK ASYNC MODE
482482
###########################
483483
self._assert_async_dependent_tasks(p)
484-
485-
print("Done with everything",file=sys.stderr)
486-
484+
485+
sys.stderr.write("Done with everything\n")

‎async/test/test_thread.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,11 @@
66
""" Test thead classes and functions"""
77
from .libimport*
88
fromasync.threadimport*
9-
fromqueueimportQueue
9+
try:
10+
fromqueueimportQueue
11+
exceptImportError:
12+
fromQueueimportQueue
13+
1014
importtime
1115

1216
classTestWorker(WorkerThread):

‎async/thread.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,10 @@
77
__docformat__="restructuredtext"
88
importthreading
99
importinspect
10-
importqueue
10+
try:
11+
importqueue
12+
exceptImportError:
13+
importQueueasqueue
1114

1215
importsys
1316

‎async/util.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,18 @@
77
fromthreadingimport (
88
Lock,
99
_allocate_lock,
10-
_sleep,
1110
_time,
12-
)
11+
)
1312

14-
fromqueueimport (
15-
Empty,
16-
)
13+
try:
14+
fromqueueimportEmpty
15+
exceptImportError:
16+
fromQueueimportEmpty
1717

1818
fromcollectionsimportdeque
1919
importsys
2020
importos
21+
importtime
2122

2223
#{ Routines
2324

@@ -130,7 +131,7 @@ def wait(self, timeout=None):
130131
# this makes 4 threads working as good as two, but of course
131132
# it causes more frequent micro-sleeping
132133
#delay = min(delay * 2, remaining, .05)
133-
_sleep(delay)
134+
time.sleep(delay)
134135
# END endless loop
135136
ifnotgotit:
136137
try:

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp