Python 2.7.10 |Anaconda 2.4.0 (32-bit)| (default, Nov 7 2015, 13:29:20) [MSC v.
1500 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> import hddm
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\Long\Anaconda2\lib\site-packages\hddm\__init__.py", line 7, in
<module>
import likelihoods
File "C:\Users\Long\Anaconda2\lib\site-packages\hddm\likelihoods.py", line 2,
in <module>
import pymc as pm
File "C:\Users\Long\Anaconda2\lib\site-packages\pymc\__init__.py", line 33, in
<module>
from .Model import *
File "C:\Users\Long\Anaconda2\lib\site-packages\pymc\Model.py", line 17, in <m
odule>
from . import database
File "C:\Users\Long\Anaconda2\lib\site-packages\pymc\database\__init__.py", li
ne 56, in <module>
from . import hdf5
File "C:\Users\Long\Anaconda2\lib\site-packages\pymc\database\hdf5.py", line 2
5, in <module>
import tables
File "C:\Users\Long\Anaconda2\lib\site-packages\tables\__init__.py", line 84,
in <module>
from tables.utilsextension import (
File "__init__.pxd", line 155, in init tables.utilsextension (tables\utilsexte
nsion.c:17736)
ValueError: numpy.dtype has the wrong size, try recompiling
Never mind. I figured out a way.
--
You received this message because you are subscribed to the Google Groups "hddm-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to hddm-users+...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
conda create --name py27_3 python=2.7.3
conda install -n py27_3 pymc==2.3.4 python=2.7.3
conda install -n py27_3 -c pymc hddm python=2.7.3 pymc=2.3.4
activate py27_3
python
import hddm
Note that the path setting is different in this environment from what the hddm README assumes. So to run the sample codes on the example file simple_difficult.csv, specify the full path for the data file.
data=hddm.load_csv('C:\Anaconda2\pkgs\hddm-0.5.5-np19py27_0\Lib\site-packages\hddm\examples\simple_difficulty.csv')
The other lines of sample codes can be run as is.
--
--
conda create --name py27_3 python=2.7.3
conda install -n py27_3 pymc==2.3.4 python=2.7.3
conda install -n py27_3 -c pymc hddm python=2.7.3 pymc=2.3.4
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) RuntimeError: module compiled against API version a but this version of numpy is 9
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-4-045d55581b72> in <module>() ----> 1 import kabuki C:\Anaconda2\lib\site-packages\kabuki\__init__.py in <module>() ----> 1 from hierarchical import * 2 3 import utils 4 import analyze 5 import step_methods as steps C:\Anaconda2\lib\site-packages\kabuki\hierarchical.py in <module>() 11 12 import pandas as pd ---> 13 import pymc as pm 14 import warnings 15 C:\Anaconda2\lib\site-packages\pymc\__init__.pyc in <module>() 31 from .NumpyDeterministics import * 32 from .distributions import * ---> 33 from .Model import * 34 from .StepMethods import * 35 from .MCMC import * C:\Anaconda2\lib\site-packages\pymc\Model.py in <module>() 15 from numpy import zeros, floor 16 from numpy.random import randint ---> 17 from . import database 18 from .PyMCObjects import Stochastic, Deterministic, Node, Variable, Potential 19 from .Container import Container, ObjectContainer C:\Anaconda2\lib\site-packages\pymc\database\__init__.py in <module>() 54 55 try: ---> 56 from . import hdf5 57 except ImportError: 58 pass C:\Anaconda2\lib\site-packages\pymc\database\hdf5.py in <module>() 23 from pymc.database import base, pickle 24 from copy import copy ---> 25 import tables 26 import os 27 import warnings C:\Anaconda2\lib\site-packages\tables\__init__.py in <module>() 82 83 # Necessary imports to get versions stored on the cython extension ---> 84 from tables.utilsextension import ( 85 get_pytables_version, get_hdf5_version, blosc_compressor_list, 86 blosc_compcode_to_compname_ as blosc_compcode_to_compname, __init__.pxd in init tables.utilsextension (tables\utilsextension.c:17736)()
ValueError: numpy.dtype has the wrong size, try recompiling
m_vat = hddm.HDDMRegressor(data, {"v ~ C(stim, Treatment('congruent'))","a ~ C(stim, Treatment('congruent'))","t ~ C(stim, Treatment('congruent'))"}, group_only_regressors=False, keep_regressor_trace=True, include=['p_outlier'])Adding these covariates:
['t_Intercept', "t_C(stim, Treatment('congruent'))[T.incongruent]", "t_C(stim, Treatment('congruent'))[T.sound]", "t_C(stim, Treatment('congruent'))[T.word]"]
Adding these covariates:
['a_Intercept', "a_C(stim, Treatment('congruent'))[T.incongruent]", "a_C(stim, Treatment('congruent'))[T.sound]", "a_C(stim, Treatment('congruent'))[T.word]"]
Adding these covariates:
['v_Intercept', "v_C(stim, Treatment('congruent'))[T.incongruent]", "v_C(stim, Treatment('congruent'))[T.sound]", "v_C(stim, Treatment('congruent'))[T.word]"]
--------------------------------------------------------------------------- IndexError Traceback (most recent call last) <ipython-input-17-d3878d2cd394> in <module>() ----> 1 m_vat = hddm.HDDMRegressor(data, {"v ~ C(stim, Treatment('congruent'))","a ~ C(stim, Treatment('congruent'))","t ~ C(stim, Treatment('congruent'))"}, group_only_regressors=False, keep_regressor_trace=True, include=['p_outlier']) C:\Anaconda2\lib\site-packages\hddm\models\hddm_regression.pyc in __init__(self, data, models, group_only_regressors, keep_regressor_trace, **kwargs) 201 self.wfpt_reg_class = deepcopy(wfpt_reg_like) 202 --> 203 super(HDDMRegressor, self).__init__(data, **kwargs) 204 205 # Sanity checks C:\Anaconda2\lib\site-packages\hddm\models\hddm_info.pyc in __init__(self, *args, **kwargs) 111 self.is_informative = kwargs.pop('informative', True) 112 --> 113 super(HDDM, self).__init__(*args, **kwargs) 114 115 def _create_stochastic_knodes(self, include): C:\Anaconda2\lib\site-packages\hddm\models\base.pyc in __init__(self, data, bias, include, wiener_params, p_outlier, **kwargs) 687 self.wfpt_class = hddm.likelihoods.generate_wfpt_stochastic_class(wp, cdf_range=self.cdf_range) 688 --> 689 super(HDDMBase, self).__init__(data, **kwargs) 690 691 def __getstate__(self): C:\Anaconda2\lib\site-packages\hddm\models\base.pyc in __init__(self, data, **kwargs) 38 self.std_depends = kwargs.pop('std_depends', False) 39 ---> 40 super(AccumulatorModel, self).__init__(data, **kwargs) 41 42 C:\Anaconda2\lib\site-packages\kabuki\hierarchical.pyc in __init__(self, data, is_group_model, depends_on, trace_subjs, plot_subjs, plot_var, group_only_nodes) 346 self.db = None 347 --> 348 self._setup_model() 349 350 def _setup_model(self): C:\Anaconda2\lib\site-packages\kabuki\hierarchical.pyc in _setup_model(self) 357 358 # constructs pymc nodes etc and connects them appropriately --> 359 self.create_model() 360 361 def __getstate__(self): C:\Anaconda2\lib\site-packages\kabuki\hierarchical.pyc in create_model(self, max_retries) 431 for tries in range(max_retries): 432 try: --> 433 _create() 434 except (pm.ZeroProbability, ValueError): 435 continue C:\Anaconda2\lib\site-packages\kabuki\hierarchical.pyc in _create() 427 def _create(): 428 for knode in self.knodes: --> 429 knode.create() 430 431 for tries in range(max_retries): C:\Anaconda2\lib\site-packages\kabuki\hierarchical.pyc in create(self) 166 kwargs['doc'] = node_name 167 --> 168 node = self.create_node(node_name, kwargs, grouped_data) 169 170 if node is not None: C:\Anaconda2\lib\site-packages\kabuki\hierarchical.pyc in create_node(self, node_name, kwargs, data) 174 def create_node(self, node_name, kwargs, data): 175 #actually create the node --> 176 return self.pymc_node(name=node_name, **kwargs) 177 178 def create_tag_and_subj_idx(self, cols, uniq_elem): C:\Anaconda2\lib\site-packages\pymc\distributions.pyc in __init__(self, *args, **kwds) 318 logp_partial_gradients=logp_partial_gradients, 319 dtype=dtype, --> 320 **arg_dict_out) 321 322 new_class.__name__ = name C:\Anaconda2\lib\site-packages\pymc\PyMCObjects.pyc in __init__(self, logp, doc, name, parents, random, trace, value, dtype, rseed, observed, cache_depth, plot, verbose, isdata, check_logp, logp_partial_gradients) 762 dtype=dtype, 763 plot=plot, --> 764 verbose=verbose) 765 766 # self._logp.force_compute() C:\Anaconda2\lib\site-packages\pymc\Node.pyc in __init__(self, doc, name, parents, cache_depth, trace, dtype, plot, verbose) 212 self.extended_children = set() 213 --> 214 Node.__init__(self, doc, name, parents, cache_depth, verbose=verbose) 215 216 if self.dtype is None: C:\Anaconda2\lib\site-packages\pymc\Node.pyc in __init__(self, doc, name, parents, cache_depth, verbose) 127 128 # Initialize --> 129 self.parents = parents 130 131 def _get_parents(self): C:\Anaconda2\lib\site-packages\pymc\Node.pyc in _set_parents(self, new_parents) 145 146 # Get new lazy function --> 147 self.gen_lazy_function() 148 149 parents = property( C:\Anaconda2\lib\site-packages\pymc\PyMCObjects.pyc in gen_lazy_function(self) 811 [self]), 812 cache_depth=self._cache_depth) --> 813 self._logp.force_compute() 814 815 self._logp_partial_gradients = {} LazyFunction.pyx in pymc.LazyFunction.LazyFunction.force_compute (pymc\LazyFunction.c:2409)() C:\Anaconda2\lib\site-packages\pymc\distributions.pyc in wrapper(**kwds) 2979 # Handle Pandas DataFrames 2980 value = getattr(value, 'values', value) -> 2981 return f(value, **kwds) 2982 2983 if arguments is None: C:\Anaconda2\lib\site-packages\hddm\models\hddm_regression.pyc in wiener_multi_like(value, v, sv, a, z, sz, t, st, reg_outcomes, p_outlier) 27 params = {'v': v, 'sv': sv, 'a': a, 'z': z, 'sz': sz, 't': t, 'st': st} 28 for reg_outcome in reg_outcomes: ---> 29 params[reg_outcome] = params[reg_outcome].ix[value['rt'].index].values 30 return hddm.wfpt.wiener_like_multi(value['rt'].values, 31 params['v'], params['sv'], params['a'], params['z'], IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
--
Hi, Tomas,
--
--