@@ -53,9 +53,8 @@ def __del__(self):
5353 # I can't explain, but appears to be normal in the destructor
5454 # On the caller side, getrefcount returns 2, as expected
5555 if sys .getrefcount (self ) < 6 :
56- print "__del__"
57- self ._pool .del_task (self ._task )
58- print "done"
56+ self ._pool .remove_task (self ._task )
57+ # END handle refcount based removal of task
5958
6059 def set_pre_cb (self , fun = lambda count : None ):
6160 """Install a callback to call with the item count to be read before any
@@ -237,12 +236,14 @@ def _prepare_channel_read(self, task, count):
237236 # the list includes our tasks - the first one to evaluate first, the
238237 # requested one last
239238 for task in dfirst_tasks :
240- if task .error () or task .is_done ():
239+ # if task.error() or task.is_done():
241240 # in theory, the should never be consumed task in the pool, right ?
242- # They delete themselves once they are done.
243- # TODO: remove this check for performance later
244- raise AssertionError ("Shouldn't have consumed tasks on the pool, they delete themeselves, what happend ?" )
245- #continue
241+ # They delete themselves once they are done. But as we run asynchronously,
242+ # It can be that someone reads, while a task realizes its done, and
243+ # we get here to prepare the read although it already is done.
244+ # Its not a problem though, the task wiill not do anything.
245+ # Hence we don't waste our time with checking for it
246+ # raise AssertionError("Shouldn't have consumed tasks on the pool, they delete themeselves, what happend ?")
246247 # END skip processing
247248
248249 # if the task does not have the required output on its queue, schedule
@@ -316,11 +317,11 @@ def _post_channel_read(self, task):
316317 """Called after we processed a read to cleanup"""
317318 pass
318319
319- def _del_task_if_orphaned (self , task ):
320+ def _remove_task_if_orphaned (self , task ):
320321 """Check the task, and delete it if it is orphaned"""
321322 # 1 as its stored on the task, 1 for the getrefcount call
322323 if sys .getrefcount (task ._out_wc ) < 3 :
323- self .del_task (task )
324+ self .remove_task (task )
324325 #} END internal
325326
326327 #{ Interface
@@ -351,7 +352,6 @@ def set_size(self, size=0):
351352 # Just adding more workers is not a problem at all.
352353 add_count = size - cur_count
353354 for i in range (add_count ):
354- print "Add worker"
355355 self .WorkerCls (self ._queue ).start ()
356356 # END for each new worker to create
357357 self ._num_workers += add_count
@@ -361,7 +361,6 @@ def set_size(self, size=0):
361361 # could be added as we speak.
362362 del_count = cur_count - size
363363 for i in range (del_count ):
364- print "stop worker"
365364 self ._queue .put ((self .WorkerCls .stop , True )) # arg doesnt matter
366365 # END for each thread to stop
367366 self ._num_workers -= del_count
@@ -390,7 +389,7 @@ def num_tasks(self):
390389 finally :
391390 self ._taskgraph_lock .release ()
392391
393- def del_task (self , task ):
392+ def remove_task (self , task ):
394393 """Delete the task
395394 Additionally we will remove orphaned tasks, which can be identified if their
396395 output channel is only held by themselves, so no one will ever consume
@@ -399,15 +398,14 @@ def del_task(self, task):
399398 This method blocks until all tasks to be removed have been processed, if
400399 they are currently being processed.
401400 :return: self"""
402- print "del_task: getting lock"
403401 self ._taskgraph_lock .acquire ()
404402 try :
405403 # it can be that the task is already deleted, but its chunk was on the
406404 # queue until now, so its marked consumed again
407405 if not task in self ._tasks .nodes :
408406 return self
409407 # END early abort
410- print "deleting " , id ( task )
408+
411409 # the task we are currently deleting could also be processed by
412410 # a thread right now. We don't care about it as its taking care about
413411 # its write channel itself, and sends everything it can to it.
@@ -426,7 +424,7 @@ def del_task(self, task):
426424 # END locked deletion
427425
428426 for t in in_tasks :
429- self ._del_task_if_orphaned (t )
427+ self ._remove_task_if_orphaned (t )
430428 # END handle orphans recursively
431429
432430 return self
0 commit comments