---------------------------------------------------------------------------
_RemoteTraceback Traceback (most recent call last)
_RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\externals\loky\backend\queues.py", line 150, in _feed
obj_ = dumps(obj, reducers=reducers)
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\externals\loky\backend\reduction.py", line 243, in dumps
dump(obj, buf, reducers=reducers, protocol=protocol)
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\externals\loky\backend\reduction.py", line 236, in dump
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\externals\cloudpickle\cloudpickle.py", line 267, in dump
return Pickler.dump(self, obj)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 437, in dump
self.save(obj)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 549, in save
self.save_reduce(obj=obj, *rv)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 662, in save_reduce
save(state)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 856, in save_dict
self._batch_setitems(obj.items())
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 882, in _batch_setitems
save(v)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 549, in save
self.save_reduce(obj=obj, *rv)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 662, in save_reduce
save(state)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 856, in save_dict
self._batch_setitems(obj.items())
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 887, in _batch_setitems
save(v)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 549, in save
self.save_reduce(obj=obj, *rv)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 662, in save_reduce
save(state)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 856, in save_dict
self._batch_setitems(obj.items())
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 882, in _batch_setitems
save(v)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 816, in save_list
self._batch_appends(obj)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 843, in _batch_appends
save(tmp[0])
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 771, in save_tuple
save(element)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 786, in save_tuple
save(element)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 549, in save
self.save_reduce(obj=obj, *rv)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 662, in save_reduce
save(state)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 856, in save_dict
self._batch_setitems(obj.items())
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 882, in _batch_setitems
save(v)
File "C:\ProgramData\Anaconda3\lib\pickle.py", line 524, in save
rv = reduce(self.proto)
TypeError: can't pickle _thread._local objects
"""
The above exception was the direct cause of the following exception:
PicklingError Traceback (most recent call last)
<ipython-input-4-ac50ab686b4a> in <module>
2
3 estimator = px.processing.Cluster(h5_main, KMeans(n_clusters=num_clusters))
----> 4 h5_kmeans_grp = estimator.compute(h5_main)
5 h5_kmeans_labels = h5_kmeans_grp['Labels']
6 h5_kmeans_mean_resp = h5_kmeans_grp['Mean_Response']
C:\ProgramData\Anaconda3\lib\site-packages\pycroscopy\processing\cluster.py in compute(self, rearrange_clusters, override)
206 """
207 if self.__labels is None and self.__mean_resp is None:
--> 208 _ = self.test(rearrange_clusters=rearrange_clusters, override=override)
209
210 if self.h5_results_grp is None:
C:\ProgramData\Anaconda3\lib\site-packages\pycroscopy\processing\cluster.py in test(self, rearrange_clusters, override)
153
154 t1 = time.time()
--> 155 self.__mean_resp = self._get_mean_response(results.labels_)
156 print('Took {} to calculate mean response per cluster'.format(format_time(time.time() - t1)))
157
C:\ProgramData\Anaconda3\lib\site-packages\pycroscopy\processing\cluster.py in _get_mean_response(self, labels)
247 func_args=[self.h5_main, labels, self.data_slice,
248 self.data_transform_func], lengthy_computation=True,
--> 249 verbose=self.verbose))
250
251 return mean_resp
C:\ProgramData\Anaconda3\lib\site-packages\pyUSID\processing\comp_utils.py in parallel_compute(data, func, cores, lengthy_computation, func_args, func_kwargs, verbose)
153 if cores > 1:
154 values = [joblib.delayed(func)(x, *func_args, **func_kwargs) for x in data]
--> 155 results = joblib.Parallel(n_jobs=cores)(values)
156
157 # Finished reading the entire data set
C:\ProgramData\Anaconda3\lib\site-packages\joblib\parallel.py in __call__(self, iterable)
932
933 with self._backend.retrieval_context():
--> 934 self.retrieve()
935 # Make sure that we get a last message telling us we are done
936 elapsed_time = time.time() - self._start_time
C:\ProgramData\Anaconda3\lib\site-packages\joblib\parallel.py in retrieve(self)
831 try:
832 if getattr(self._backend, 'supports_timeout', False):
--> 833 self._output.extend(job.get(timeout=self.timeout))
834 else:
835 self._output.extend(job.get())
C:\ProgramData\Anaconda3\lib\site-packages\joblib\_parallel_backends.py in wrap_future_result(future, timeout)
519 AsyncResults.get from multiprocessing."""
520 try:
--> 521 return future.result(timeout=timeout)
522 except LokyTimeoutError:
523 raise TimeoutError()
C:\ProgramData\Anaconda3\lib\concurrent\futures\_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
C:\ProgramData\Anaconda3\lib\concurrent\futures\_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
PicklingError: Could not pickle the task to send it to the workers
Any input would be appreciated.
Thanks
Luc