[pyrwi] r7 committed - Add code so that nidaqmx.i can be re-built.

11 views
Skip to first unread message

codesite...@google.com

unread,
Oct 24, 2009, 5:42:35 PM10/24/09
to pyrwi...@googlegroups.com
Revision: 7
Author: danielrairigh
Date: Sat Oct 24 14:41:48 2009
Log: Add code so that nidaqmx.i can be re-built.
http://code.google.com/p/pyrwi/source/detail?r=7

Added:
/trunk/nidaqmx/genInterface.py
Modified:
/trunk/nidaqmx/nidaqmx_template.i

=======================================
--- /dev/null
+++ /trunk/nidaqmx/genInterface.py Sat Oct 24 14:41:48 2009
@@ -0,0 +1,422 @@
+'''
+genInterface.py
+
+This script automatically generates portions of the nidaqmx
+SWIG interface
+
+Usage:
+python genInterface.py C:\Program Files\National Instruments\NI-DAQ\DAQmx
ANSI C Dev\include\NIDAQmx.h
+'''
+
+#TODO:
+# - Remove "typedef void* TaskHandle;"
+# - Change docstring for all funcs modified by typemap
+# - Omit docstrings on constants
+
+import sys
+import os.path
+import re
+import textwrap
+import warnings
+
+
+def buildConstFile(parser, clobber=False) :
+ path = 'nidaqmx_const.i'
+ template_path = 'nidaqmx_const_template.i'
+ #Top level sections that contain these keywords will be output into
nidaqmx_const.i
+ ConstSections = ('Values','NI-DAQmx Attributes','Error Codes')
+ sec_list = []
+
+ if not clobber and os.path.exists(path) :
+ raise IOError('The file %s already exists.' % path)
+
+ fout = open(path, mode='w')
+ #Copy the template in
+ for l in open(template_path, mode='r') :
+ fout.write( l )
+
+ #Find Constant Sections and write them
+ for k in ConstSections :
+ for sec in parser.data.keys() :
+ if sec.find( k ) >= 0 :
+ sec_list.append(sec)
+ fout.write( parser.fmtData( sec ) + "\n\n" )
+ break
+
+ fout.close()
+ return sec_list
+
+def buildMainFile(parser, usedKeys, clobber=False) :
+ path = 'nidaqmx.i'
+ template_path = 'nidaqmx_template.i'
+
+ if not clobber and os.path.exists(path) :
+ raise IOError('The file %s already exists.' % path)
+
+ fout = open(path, mode='w')
+ #Copy the template in
+ for l in open(template_path, mode='r') :
+ fout.write( l )
+
+ #Find Constant Sections and write them
+ for sec in parser.top_sec_order :
+ if sec not in usedKeys :
+ fout.write( parser.fmtData( sec ) + "\n\n" )
+
+ fout.close()
+
+
+class ParseError(Exception):
+ '''Exception object for code parsing errors'''
+ def __init__(self, msg):
+ self.msg = msg
+ def __str__(self):
+ return str(self.msg)
+
+class CodeParser :
+ '''
+ Reads in the NIDAQmx.h header file and parses it into sections
+ This class also applies any necessary code transformations
+ '''
+ class StateInfo :
+ def __init__(self, state, start, stop, watch=3):
+ self.state = state
+ self.re_start = start
+ self.re_stop = stop
+ self.watch_dog = watch
+ #This tuple is faster to iterate over than a dict
+ States = (
+ StateInfo('section', re.compile("/\*{78}"), re.compile("\*/")),
+ StateInfo('subsection', re.compile("/\*{54}"),
re.compile("/\*{54}") ),
+ StateInfo('subsub', re.compile("//\*{2,} \w[^\*]+"),
re.compile("^(?!//)"), watch=20),
+ StateInfo('precompile', re.compile("^\#([^ ]+)(\s+[^ ]+)?"),
re.compile(".")), #One line state
+ )
+
+ class Section :
+ def __init__(self, title, parent=None) :
+ self.title = title
+ self.lines = []
+ self.children = [] #List of sub-sections
+ self.parent = parent #Pointer to parent
+ def __getitem__(self, index) :
+ return self.children[index]
+
+ def __init__(self, header_path=None) :
+ if header_path is not None :
+ self.header_path = header_path
+ else :
+ self.header_path = "C:\Program Files\National
Instruments\NI-DAQ\DAQmx ANSI C Dev\include\NIDAQmx.h"
+ if not os.path.exists(self.header_path) :
+ raise IOError("File '%s' not found" % self.header_path)
+
+ #Will be re-used often to format comments
+ self.wrapper = textwrap.TextWrapper(width=70)
+
+ self.data = {}
+ self.top_sec_order = []
+ #There is a better way. Instead of used a dict, I could create
+ #a doubly linked item class (see self.Section above) and build
+ # a tree, but I don't have time right now.
+ self.defines = {} #Track defines, so I can omit dups
+
+ self.parseHeader()
+
+ def parseHeader_precompile(self, l, directive, arg, l_cnt) :
+ """
+ This is used by self.parseHeader() to handle precompiler statements
+ """
+ if arg : arg = arg.strip()
+
+ if directive == "ifndef" :
+ #Remove just the #ifndef and matching #define & #endif
+ self.banned_if.append( self.pre_if )
+ self.banned_define.append( arg )
+
+ self.pre_if += 1 #The next pre_if will be this number
+
+ elif directive == "ifdef" :
+ if arg in ["__cplusplus", "__linux__"] :
+ #Remove everything inside these blocks
+ self.banned_if.append( self.pre_if )
+ self.block_storeLine = self.pre_if
+ else :
+ self.storeLine(l) #str(pre_if) +
+ self.pre_if += 1 #The next pre_if will be this number
+
+ elif directive == "if" :
+ self.storeLine(l) #str(pre_if) +
+ self.pre_if += 1 #The next pre_if will be this number
+
+ elif directive == "endif" :
+ self.pre_if -= 1 #The next #if will be at this level
+ if self.pre_if < 0 : raise ParseError('#if nesting error.
Line %d' % l_cnt)
+
+ if len(self.banned_if) > 0 and self.pre_if ==
self.banned_if[-1] :
+ self.banned_if.pop()
+ if self.pre_if == self.block_storeLine:
+ self.block_storeLine= False
+ else :
+ self.storeLine(l) #str(pre_if) +
+
+ elif directive == "define" :
+ #TODO: merge banned_define and self.defines
+ if arg not in self.banned_define and not self.defines.has_key(
arg ) :
+ self.defines[arg] = 1 #Don't duplicate defines
+ self.storeLine(l)
+ else :
+ self.storeLine(l)
+
+ def parseHeader(self) :
+ cur_state = None #Searching for something to trigger a state
+
+ #--- Init parsing variables
+ watch = 0 #Watchdog
+ l_cnt = 0
+ self.setSection(sec='base') #Init all section levels to 'base'
+ self.pre_if = 0 #Count precompiler IF nesting
+ self.banned_if = [] #If's that must have their #endif removed
+ self.banned_define = [] #Defines that I don't want
+ self.block_storeLine = False
+
+ #--- Parse Line by line
+ for l in open(self.header_path, 'r') :
+ l = l.strip()
+ l_cnt += 1
+
+ #"for i" allows me to repeat the current line if parsing shows
+ #that the line belongs to some state other than cur_state
+ #such as subsub's end condiction
+ for i in xrange(2) :
+
+ #Check for a change in state
+ if cur_state is None :
+ for pats in self.States :
+ match = pats.re_start.search(l)
+ if match :
+ cur_state = pats.state
+ re_stop = pats.re_stop
+ txt = ''
+ watch = 0
+ break
+
+ if cur_state is None :
+ self.storeLine(l)
+ else : #Process line for current state
+ if cur_state in ('section', 'subsection', 'subsub') :
+ if watch > 0 and re_stop.search(l) : # Don't stop
on 1st line
+ if cur_state == 'section' :
+ self.setSection(sec=txt.strip()) #New top
level section
+ elif cur_state == 'subsection' :
+ self.setSection(sub=txt.strip()) #New
subsection
+ else :
+ self.setSection(subsub=txt.strip()) #New
sub-subsection
+ cur_state = None
+ continue #Re-parse this line
+
+ cur_state = None
+ else :
+ #Final line of each section will not be
included
+ txt += l.strip('/* ') + ' ' #Remove commenting
text
+
+ elif cur_state == "precompile" :
+ (directive, arg) = match.groups()
+ self.parseHeader_precompile(l, directive, arg,
l_cnt)
+ cur_state = None
+
+ else :
+ raise ParseError('%s is an uknown state' %
cur_state)
+
+ watch += 1
+ if pats.watch_dog > 0 and watch > pats.watch_dog :
+ raise ParseError('The current %s ran for too long
(%d). Line %d'\
+ % (cur_state, watch, l_cnt) )
+ break #out of "for i"
+
+ if self.pre_if != 0 :
+ warnings.warn('Precompiler IF blocks are not all closed. Ended
with a nesting level of %d' % (pre_if) )
+
+ RE_NAME = re.compile('(DAQmx_?)(\w+)')
+ def storeLine(self, l) :
+ '''
+ Store a line of code read by the parser. Any universal code
+ clean up should go here.
+ '''
+ if self.block_storeLine : return #Currently skipping code
+
+ #--- Separate comments and code
+ i = l.find('//')
+ if i < 0 :
+ code = l
+ com = ''
+ else :
+ code = l[:i]
+ com = l[i+2:]
+
+ #--- Apply code changes that are universal
+ #Label output TaskHandles properly
+ code = code.replace('TaskHandle *taskHandle', 'TaskHandle *OUTPUT')
+
+ m = self.RE_NAME.search(l)
+ if m :
+ name = m.groups()[1]
+ fullName = m.group()
+ l = ''
+ #Remove name prefix
+ l += '%%rename(%s) %s;\n' % (name, fullName)
+ #Turn comments into doc strings
+ if com and self.cur_state != 'precompile':
+ l += '%%feature("docstring", "%s") %s;\n' %
(com.replace('"', '\\"'), fullName)
+ #Add actual code
+ l += code
+ else :
+ l = code
+ if com : l += '//' + com
+
+ #Store this line
+ self.data[self.cur_sec][self.cur_subsec][self.cur_subsub].append(l)
+
+ def setSection(self, sec=None, sub=None, subsub=None) :
+ '''
+ Section changes cascode down, this function handles that.
+ '''
+ if sec is not None :
+ self.cur_sec = sec
+ self.cur_subsec = sub or 'base'
+ self.cur_subsub = subsub or 'base'
+ if not self.data.has_key(sec) :
+ self.top_sec_order.append( sec )
+ rtn = sec
+ elif sub is not None :
+ self.cur_subsec = sub
+ self.cur_subsub = subsub or 'base'
+ rtn = sub
+ elif subsub is not None :
+ self.cur_subsub = subsub
+ rtn = subsub
+
+ #Create empty data keys for any in the structure that are missing
+ self.data.setdefault(self.cur_sec, {} )
+ self.data[self.cur_sec].setdefault( self.cur_subsec, {} )
+ self.data[self.cur_sec][self.cur_subsec].setdefault(
self.cur_subsub, [] )
+ return rtn
+
+
+ def fmtData(self, *keys) :
+ '''
+ Format a string from the parsed code.
+ @param section: OPTIONAL section to limit output to
+ @param subsec: OPTIONAL subsection to limit output to
+ @param subsub: OPTIONAL sub-subsection to limit output to
+ @return : A string with all code formatted
+ '''
+ rtn = []
+
+ d = self.data
+ depth = 0
+ for k in keys :
+ d = d[k]
+ rtn.append( self.fmtSectionTitle( k, depth) )
+ depth += 1
+
+ return "\n".join( rtn + self._fmtSection(d, depth) )
+
+ def _fmtSection(self, data, depth=0) :
+ '''
+ Recursive function used by fmtData to format each section
+ correctly
+ @param data : peice of code to be formatted
+ @param depth :OPTIONAL section depth
+ @return : An array of formatted strings
+ '''
+ rtn = []
+ if hasattr(data, 'items') :
+ for name, sec in data.items() :
+ if name != 'base' :
+ rtn.append( self.fmtSectionTitle( name, depth) )
+ rtn += self._fmtSection( sec, depth=depth+1 )
+ else :
+ rtn += data
+ #rtn.append( str( len(data) ) ) #TODO: Change back
+ return rtn
+
+ def fmtSectionTitle(self, title, depth=0) :
+ '''
+ Formats a section title for output. This controlls how all section
+ headings appear.
+ @param title: Section name
+ @param depth: OPTIONAL Section depth
+ @return : String with title nicely formatted
+ '''
+ leader = '*'*depth
+ self.wrapper.initial_indent = '/*' + leader + ' '
+ self.wrapper.subsequent_indent = ' *' + leader + ' '
+
+ list = title.split("Values for ")
+ if len(list) > 2 :
+ txt = list[0] + "Values for " + ", ".join(list[1:-1]) + \
+ " and " + list[-1]
+ txt = txt.replace(" , ", ", ")
+ else :
+ txt = title
+
+ return self.wrapper.fill( txt ) + '\n' + ' ' + leader + '*/'
+
+
+
+class Usage(Exception):
+ '''Exception object for command line argument errors'''
+ def __init__(self, msg):
+ self.msg = msg
+
+#Derived from http://www.artima.com/weblogs/viewpost.jsp?thread=4829
+def main(argv=None):
+ '''Parse command line and call CodeParser'''
+ import getopt
+ if argv is None: argv = sys.argv
+
+ try: #Usage trap, so all usage errors come to same exit point
+ try:
+ opts, args = getopt.getopt(argv[1:], "hf", ["help","force"])
+ except getopt.error, msg:
+ raise Usage(msg)
+
+ # process options
+ force = True #TODO: False
+ for o, a in opts:
+ if o in ("-h", "--help"):
+ print __doc__
+ return 0
+ if o in ("-f", "--force"):
+ force = True
+
+ # process arguments
+ if args :
+ path = args[0]
+ else :
+ path = None
+
+ #Parse code
+ code = CodeParser(path)
+
+ #Output results
+ usedKeys = buildConstFile(code, force)
+ buildMainFile(code, usedKeys, force)
+
+ if True : print code.fmtData()[0:100] #'NI-DAQmx Specific
Attribute Get/Set/Reset Function Declarations'
+ if False :
+ import cProfile
+ profile = cProfile.run('CodeParser()', 'out')
+ elif True :
+ import timeit
+ print timeit.timeit('CodeParser()', "from __main__ import
CodeParser", number=50)
+
+
+ except Usage, err:
+ print >>sys.stderr, err.msg
+ print >>sys.stderr, "for help use --help"
+ return 2
+
+if __name__ == "__main__":
+ main()
+ #sys.exit(main())
+
=======================================
--- /trunk/nidaqmx/nidaqmx_template.i Sat Sep 5 14:55:38 2009
+++ /trunk/nidaqmx/nidaqmx_template.i Sat Oct 24 14:41:48 2009
@@ -78,7 +78,38 @@
%typemap(freearg) uInt8 writeArray[] {
if ($1) free($1);
}
-//TODO: Handle other data types
+
+/* Copy all three typemap modes (in,argout, and freearg) the other integer
types */
+%apply uInt8 writeArray[] { int16 writeArray[] };
+%apply uInt8 writeArray[] { int32 writeArray[] };
+%apply uInt8 writeArray[] { uInt16 writeArray[] };
+%apply uInt8 writeArray[] { uInt32 writeArray[] };
+
+//Repeat for float64
+%typemap(in) float64 writeArray[] {
+ Py_ssize_t sz, i;
+ $*1_type *lst_ptr;
+
+ sz = PyList_Size($input);
+ lst_ptr = $1 = ($*1_type *) malloc(sizeof($*1_type)*sz);
+ if ($1 == NULL) {
+ PyErr_NoMemory();
+ SWIG_fail;
+ }
+
+ for (i = 0; i < sz; i++, lst_ptr++) {
+ PyObject *s = PyList_GetItem($input,i);
+ if (!PyFloat_Check(s)) {
+ free($1);
+ PyErr_SetString(PyExc_ValueError, "All list items must be
Floats");
+ return NULL;
+ }
+ *lst_ptr = PyFloat_AsDouble(s);
+ }
+}
+%typemap(freearg) float64 writeArray[] {
+ if ($1) free($1);
+}

//Do not require sampsPerChanWritten as an input
%typemap(in, numinputs=0) int32 *sampsPerChanWritten ($*1_type temp) {
@@ -117,6 +148,8 @@
for(i=0; i < sz; i++) {
PyList_SetItem($result, i, PyFloat_FromDouble($1[i]));
}
+ //TODO: I wish I could use PyList_FromReturn() instead, but need
+ // task handle or channel count
}
%typemap(argout) (int16 readArray[], uInt32 arraySizeInSamps) {
$2_type sz, i;
@@ -132,7 +165,6 @@
%typemap(freearg) (float64 readArray[], uInt32 arraySizeInSamps) {
free(($*1_type *) $1);
}
-
%typemap(freearg) (int16 readArray[], uInt32 arraySizeInSamps) = (float64
readArray[], uInt32 arraySizeInSamps);

/* Copy all three typemap modes (in,argout, and freearg) the other integer
types */
@@ -147,6 +179,7 @@
}
%typemap(argout) float64 *value { $result = PyFloat_FromDouble(*$1);}
%typemap(argout) uInt32 *value { $result = PyInt_FromLong(*$1); }
+

/**********************************************************************
* EveryN callback issues
@@ -281,7 +314,7 @@
float64 *readArray; //Will hold the raw data read from DAQ
PyObject *dataOut; // Python list made from readArray
int32 read, err; // Samples/Chan read and return error
code
- uInt32 i; // Loop Index
+ uInt32 chCnt,i; // Channel Count, Loop Index
PyObject *func, *arglist; // Python callback and argument list
PyObject *result; // Return value from Python callback

@@ -295,20 +328,26 @@
SWIG_exception(SWIG_ValueError, "ReadAnalogF64_cb only works
for input tasks");
}

+ //Determine how many channels there are in the task
+ DAQmxGetReadNumChans(taskHandle, &chCnt);
+
//Make space for data and fill the space from the DAQ buffer
- readArray = (float64 *) malloc(sizeof(float64)*nSamples);
+ readArray = (float64 *) malloc(sizeof(float64)*nSamples*chCnt);
if (readArray == NULL) {
PyErr_NoMemory();
err = DAQmxErrorRuntimeAborting_Routing;
SWIG_fail;
}
err = DAQmxReadAnalogF64(taskHandle,DAQmx_Val_Auto,10.0,
- DAQmx_Val_GroupByChannel,readArray,nSamples,&read,NULL);
+ DAQmx_Val_GroupByChannel,readArray,nSamples*chCnt,&read,NULL);
if ( DAQmxFailed(err) ) { RaiseError(); SWIG_fail; }

//Convert read data to a list
- dataOut = PyList_New(nSamples);
- for(i=0; i < nSamples; i++) {
+ //NOTE: Use read, because it tells how much was actually read,
+ // So we can ommit empty buffer space
+ //TODO: Use PyList_FromReturn() instead
+ dataOut = PyList_New(read*chCnt);
+ for(i=0; i < read*chCnt; i++) {
if ( PyList_SetItem(dataOut, i,
PyFloat_FromDouble(readArray[i])) != 0) {
if( PyErr_Occurred() != NULL) {
SWIG_fail; //Handle as regular Python error
@@ -319,7 +358,7 @@
}
}
}
-
+
//Call the Python function
func = (PyObject *) python_cb;
if (!PyCallable_Check(func)) {
@@ -355,6 +394,48 @@
return err;
}
//TODO: Build a digital corelary to ReadDigitalU32_cb
+
+ /* Take the return from DAQmxReadAnalogF64() and segment it into
+ * Python lists. Note this will separate channel data into separate
lists.
+ * @param readArray: Pointer to array of data read by
DAQmxReadAnalogF64
+ * @param sampsPerChanRead: Value of same name returned by
DAQmxReadAnalogF64
+ * @param chCnt: Number of channels in this task
+ * @return: If chCnt > 1: [[ch1 Data], [ch2 Data], etc.]
+ * If chCn == 1 : [ch1 Data]
+ * If error : NULL
+ */
+ PyObject *PyList_FromReturn(float64 *readArray,
+ int32 sampsPerChanRead, uInt32 chCnt) {
+ PyObject *dataOut, *lst;
+ float64 *arrPnt=readArray;
+ uInt32 ch;
+ int32 i;
+
+ dataOut = PyList_New(chCnt);
+ for(ch=0; ch < chCnt; ch++) {
+ lst = PyList_New(sampsPerChanRead);
+ for(i=sampsPerChanRead*ch; i < sampsPerChanRead*(ch+1); i++,
arrPnt++) {
+ if ( PyList_SetItem(lst, i,
PyFloat_FromDouble(*arrPnt)) != 0) {
+ if( PyErr_Occurred() != NULL) { //TODO: PyErr_Occurred
== NULL but SetItem still failed?
+ SWIG_fail; //Handle as regular Python error
+ } else {
+ //Failed to raise Python error, return some sort
of feedback
+ printf("Assignement error for data[%d]=%f", i,
readArray[i]);
+ break;
+ }
+ }
+ }
+ PyList_SetItem(dataOut, ch, lst);
+ }
+
+ if (chCnt > 1) {
+ return dataOut; //List of lists
+ } else {
+ return lst; //Single channel, return 1D list
+ }
+ fail: //Error
+ return NULL;
+ }
%}

//Make sure EveryNCallPython_cb & ReadAnalogF64_cb show up on the Python
side

Reply all
Reply to author
Forward
0 new messages