Description
Loading meshes from the L2/3 pinky dataset hangs indefinitely, until it fails with a timeout warning.
from meshparty import trimesh_io
mesh_dir = 'data/meshes/'
seg_source = "precomputed://gs://microns_public_datasets/pinky100_v185/seg"
seg_id = 648518346349539076
mm = trimesh_io.MeshMeta(cv_path=seg_source,
disk_cache_path=mesh_dir,
cache_size=20,
)
downloadmesh = mm.mesh(seg_id=seg_id, remove_duplicate_vertices=True,force_download=True)
fails with the following error
RemoteTraceback Traceback (most recent call last)
RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\connectionpool.py", line 467, in _make_request
self._validate_conn(conn)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\connectionpool.py", line 1096, in _validate_conn
conn.connect()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\connection.py", line 642, in connect
sock_and_verified = _ssl_wrap_socket_and_match_hostname(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\connection.py", line 782, in _ssl_wrap_socket_and_match_hostname
ssl_sock = ssl_wrap_socket(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\util\ssl_.py", line 470, in ssl_wrap_socket
ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\util\ssl_.py", line 514, in _ssl_wrap_socket_impl
return ssl_context.wrap_socket(sock, server_hostname=server_hostname)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\ssl.py", line 500, in wrap_socket
return self.sslsocket_class._create(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\ssl.py", line 1040, in _create
self.do_handshake()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\ssl.py", line 1309, in do_handshake
self._sslobj.do_handshake()
ssl.SSLZeroReturnError: TLS/SSL connection has been closed (EOF) (_ssl.c:1131)During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\connectionpool.py", line 790, in urlopen
response = self._make_request(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\connectionpool.py", line 491, in _make_request
raise new_e
urllib3.exceptions.SSLError: TLS/SSL connection has been closed (EOF) (_ssl.c:1131)The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\requests\adapters.py", line 486, in send
resp = conn.urlopen(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\connectionpool.py", line 844, in urlopen
retries = retries.increment(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\urllib3\util\retry.py", line 515, in increment
raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='storage.googleapis.com', port=443): Max retries exceeded with url: /microns_public_datasets/pinky100_v185/seg/mesh_mip_2_err_40/648518346349539076:0:5824-6272_8640-9088_1344-1792 (Caused by SSLError(SSLZeroReturnError(6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1131)')))During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\multiprocess\pool.py", line 125, in worker
result = (True, func(*args, **kwds))
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\pathos\helpers\mp_helper.py", line 15, in
func = lambda args: f(*args)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\cloudfiles.py", line 460, in get
results = schedule_jobs(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\scheduler.py", line 144, in schedule_jobs
return schedule_threaded_jobs(fns, concurrency, progress, total, count_return)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\scheduler.py", line 40, in schedule_threaded_jobs
tq.put(updatefn(fn))
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\threaded_queue.py", line 254, in exit
self.wait(progress=self.with_progress)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\threaded_queue.py", line 224, in wait
self._check_errors()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\threaded_queue.py", line 188, in _check_errors
raise err
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\threaded_queue.py", line 150, in _consume_queue
self._consume_queue_execution(fn)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\threaded_queue.py", line 177, in _consume_queue_execution
fn()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\scheduler.py", line 33, in realupdatefn
res = fn()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\cloudfiles.py", line 433, in download
raise error
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\cloudfiles.py", line 416, in download
content, encoding, server_hash, server_hash_type = conn.get_file(
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\tenacity_init_.py", line 289, in wrapped_f
return self(f, *args, **kw)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\tenacity_init_.py", line 379, in call
do = self.iter(retry_state=retry_state)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\tenacity_init_.py", line 325, in iter
raise retry_exc.reraise()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\tenacity_init_.py", line 158, in reraise
raise self.last_attempt.result()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\concurrent\futures_base.py", line 437, in result
return self.__get_result()
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\concurrent\futures_base.py", line 389, in __get_result
raise self._exception
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\tenacity_init_.py", line 382, in call
result = fn(*args, **kwargs)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\interfaces.py", line 741, in get_file
resp = self.session.get(key)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\requests\sessions.py", line 602, in get
return self.request("GET", url, **kwargs)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\requests\sessions.py", line 589, in request
resp = self.send(prep, **send_kwargs)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\requests\sessions.py", line 703, in send
r = adapter.send(request, **kwargs)
File "C:\Users\bethanny.danskin\anaconda3\envs\microns2023\lib\site-packages\requests\adapters.py", line 517, in send
raise SSLError(e, request=request)
requests.exceptions.SSLError: HTTPSConnectionPool(host='storage.googleapis.com', port=443): Max retries exceeded with url: /microns_public_datasets/pinky100_v185/seg/mesh_mip_2_err_40/648518346349539076:0:5824-6272_8640-9088_1344-1792 (Caused by SSLError(SSLZeroReturnError(6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1131)')))
"""The above exception was the direct cause of the following exception:
SSLError Traceback (most recent call last)
File :11File ~\anaconda3\envs\microns2023\lib\site-packages\meshparty\trimesh_io.py:648, in MeshMeta.mesh(self, filename, seg_id, cache_mesh, merge_large_components, stitch_mesh_chunks, overwrite_merge_large_components, remove_duplicate_vertices, force_download, lod, voxel_scaling)
646 cv_mesh_d = self.cv.mesh.get(seg_id, lod=lod)
647 else:
--> 648 cv_mesh_d = self.cv.mesh.get(
649 seg_id, remove_duplicate_vertices=remove_duplicate_vertices)
650 if isinstance(cv_mesh_d, (dict, collections.defaultdict)):
651 cv_mesh = cv_mesh_d[seg_id]File ~\anaconda3\envs\microns2023\lib\site-packages\cloudvolume\datasource\precomputed\mesh\unsharded.py:165, in UnshardedLegacyPrecomputedMeshSource.get(self, segids, remove_duplicate_vertices, fuse, chunk_size)
163 for path in paths:
164 path_id_map[path] = segid
--> 165 fragments = self._get_mesh_fragments(path_id_map)
167 # decode all the fragments
168 meshdata = defaultdict(list)File ~\anaconda3\envs\microns2023\lib\site-packages\cloudvolume\datasource\precomputed\mesh\unsharded.py:94, in UnshardedLegacyPrecomputedMeshSource._get_mesh_fragments(self, path_id_map)
91 if compress is None:
92 compress = True
---> 94 fragments = self.cache.download(paths, compress=compress)
95 fragments = [
96 (filename, content, path_id_map[os.path.basename(filename)])
97 for filename, content in fragments.items()
98 ]
99 fragments = sorted(fragments, key=lambda frag: frag[0]) # make decoding deterministicFile ~\anaconda3\envs\microns2023\lib\site-packages\cloudvolume\cacheservice.py:509, in CacheService.download(self, paths, compress, progress)
497 fragments = {
498 "/".join(key.split('\')): val
499 for key,val in fragments.items()
500 }
502 cf = CloudFiles(
503 self.meta.cloudpath,
504 progress=progress,
(...)
507 locking=self.config.cache_locking,
508 )
--> 509 remote_fragments = cf.get(locs['remote'], raw=True)
511 for frag in remote_fragments:
512 if frag['error'] is not None:File ~\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\cloudfiles.py:111, in parallelize..decor..inner_decor(*args, **kwargs)
108 else:
109 fn = partial(fn, self, **params)
--> 111 return parallel_execute(
112 fn, input_value, parallel, total, progress,
113 desc=desc, returns_list=returns_list
114 )File ~\anaconda3\envs\microns2023\lib\site-packages\cloudfiles\cloudfiles.py:155, in parallel_execute(fn, inputs, parallel, total, progress, desc, returns_list)
153 try:
154 with pathos.pools.ProcessPool(parallel) as executor:
--> 155 for res in executor.imap(fn, sip(inputs, block_size)):
156 if isinstance(res, int):
157 pbar.update(res)File ~\anaconda3\envs\microns2023\lib\site-packages\multiprocess\pool.py:868, in IMapIterator.next(self, timeout)
866 if success:
867 return value
--> 868 raise valueSSLError: None: Max retries exceeded with url: /microns_public_datasets/pinky100_v185/seg/mesh_mip_2_err_40/648518346349539076:0:5824-6272_8640-9088_1344-1792 (Caused by None)
However, using cloudvolume with the same path and segment id succeeds:
import cloudvolume
seg_source = "precomputed://gs://microns_public_datasets/pinky100_v185/seg"
seg_id = 648518346349539076
cv_pinky = cloudvolume.CloudVolume(seg_source, use_https=True)
cv_mesh = cv_pinky.mesh.get(seg_id, remove_duplicate_vertices=True)
As does trimesh_io download of a different datasource ('precomputed://gs://iarpa_microns/minnie/minnie65/seg_m943')
I couldn't identify the source of the error, but it seems deep in MeshParty's handling of threaded downloads from different sources.