Skip to content

Commit

Permalink
Fix parallel in case no documents are remaining
Browse files Browse the repository at this point in the history
  • Loading branch information
ubmarco committed Oct 11, 2023
1 parent b1b5abf commit 6a21ee9
Showing 1 changed file with 44 additions and 42 deletions.
86 changes: 44 additions & 42 deletions sphinx/builders/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -653,48 +653,50 @@ def on_chunk_done(args: list[tuple[str, NoneType]], result: NoneType) -> None:
next(progress)

self.app.phase = BuildPhase.RESOLVING
measure_1 = f'{__file__}: {inspect.currentframe().f_code.co_name}: get_doctree_write'
measure_2 = f'{__file__}: {inspect.currentframe().f_code.co_name}: write_doc_serialized'
for docname in docnames:
with TimeIt(measure_1, self.runtime, logger, False):
doctree = self.env.get_doctree_write(docname)
with TimeIt(measure_2, self.runtime, logger, False):
self.write_doc_serialized(docname, doctree)
logger.info(f'{measure_1}: {self.runtime[measure_1]}')
logger.info(f'{measure_2}: {self.runtime[measure_2]}')

with WorkerPool(n_jobs=nproc, start_method='fork', use_dill=False, pass_worker_id=True, enable_insights=True) as pool:
# args = []
# for docname in docnames:
# doctree = self.env.get_and_resolve_doctree(docname, self)
# self.write_doc_serialized(docname, doctree)
# args.append([(docname, doctree)])
# import pickle

# from dill import dumps, loads
# doctree_dilled = dumps(args[0][0][1], protocol=pickle.HIGHEST_PROTOCOL)
# doctree_readback = loads(doctree_dilled)
with TimeIt(f'{__file__}: {inspect.currentframe().f_code.co_name}: WorkerPool', self.runtime, logger):
results = pool.map(write_process, docnames, worker_lifespan=5, progress_bar=True)
for result in results:
warnings, images = result
for warning in warnings:
logger.warning(warning)
self.images.update(images)
insights = pool.get_insights()
import json
print(json.dumps(insights, indent=2))
# print(insights)
# for chunk in chunks:
# arg = []
# for docname in chunk:
# doctree = self.env.get_and_resolve_doctree(docname, self)
# self.write_doc_serialized(docname, doctree)
# arg.append((docname, doctree))
# tasks.add_task(write_process, arg, on_chunk_done)

# # make sure all threads have finished
# tasks.join()
if docnames:
measure_1 = f'{__file__}: {inspect.currentframe().f_code.co_name}: get_doctree_write'
measure_2 = f'{__file__}: {inspect.currentframe().f_code.co_name}: write_doc_serialized'
for docname in docnames:
with TimeIt(measure_1, self.runtime, logger, False):
doctree = self.env.get_doctree_write(docname)
with TimeIt(measure_2, self.runtime, logger, False):
self.write_doc_serialized(docname, doctree)
logger.info(f'{measure_1}: {self.runtime[measure_1]}')
logger.info(f'{measure_2}: {self.runtime[measure_2]}')

with WorkerPool(n_jobs=nproc, start_method='fork', use_dill=False, pass_worker_id=True, enable_insights=True) as pool:
# args = []
# for docname in docnames:
# doctree = self.env.get_and_resolve_doctree(docname, self)
# self.write_doc_serialized(docname, doctree)
# args.append([(docname, doctree)])
# import pickle

# from dill import dumps, loads
# doctree_dilled = dumps(args[0][0][1], protocol=pickle.HIGHEST_PROTOCOL)
# doctree_readback = loads(doctree_dilled)
with TimeIt(f'{__file__}: {inspect.currentframe().f_code.co_name}: WorkerPool', self.runtime, logger):
results = pool.map(write_process, docnames, worker_lifespan=5, progress_bar=True)
for result in results:
warnings, images = result
for warning in warnings:
logger.warning(warning)
self.images.update(images)
insights = pool.get_insights()
import json
print(json.dumps(insights, indent=2))
# print(insights)
# for chunk in chunks:
# arg = []
# for docname in chunk:
# doctree = self.env.get_and_resolve_doctree(docname, self)
# self.write_doc_serialized(docname, doctree)
# arg.append((docname, doctree))
# tasks.add_task(write_process, arg, on_chunk_done)

# # make sure all threads have finished
# tasks.join()
import json
print(json.dumps(self.runtime, indent=2))
logger.info('')

Expand Down

0 comments on commit 6a21ee9

Please sign in to comment.