self.my_pid = this_task.last_doer_pid new_doer = { 'doer_name': self.get_doer_name(this_task.id), 'task_queue': this_task.id, 'method_name': this_task.method_name, 'pid': self.my_pid } this_doer_id = self.dbase.task_doer.insert(**self.dbase.task_doer._filter_fields(new_doer)) this_doer = self.dbase.task_doer[this_doer_id] this_task.update_record(status=RUNNING, last_doer_id=this_doer.id, doer_name=new_doer['doer_name']) self.dbase.commit()
<snip socket handling> <snip method name getting>
if not run_method: this_doer.update_record(status=FAILED, status_note='Method %s not implemented' % this_task.method_name) self.dbase.commit() raise Exception('Method %s not implemented' % this_task.method_name)
passed_args = passed_vars = None # close off the connection so it doesn't drop out during the processing self.dbase.commit() try: run_method( *loads(this_task.args, object_hook=self._decode_dict), **loads(this_task.vars, object_hook=self._decode_dict)) #, *passed_vars) except Exception as ee: this_doer.update_record(status=FAILED, status_note='Failed: ' + str(ee)) debug_log(sys._getframe().f_code.co_name, None, start=method_start, error_details=ee, severity='Fail' ) raise Exception('Failure during method execution: ' + ee)
print 'About to save status' this_doer.update_record(status=COMPLETED, status_note='Normal exit') self.dbase.commit()
print 'Clean exit for doer ' + str(this_doer.pid) self.dbase.commit() self.dbase._adapter.close(action=None) try:
# now the long running task - in the test it's a sleep for 305 seconds. run_method( *loads(this_task.args, object_hook=self._decode_dict), **loads(this_task.vars, object_hook=self._decode_dict)) #, *passed_vars) except Exception as ee:
self.dbase._adapter.reconnect()
this_doer = change_status( this_doer, FAILED, 'Failed: ' + str(ee) ) this_doer.update_record() debug_log(sys._getframe().f_code.co_name, None, start=method_start, error_details=ee, severity='Fail') raise Exception('Failure during method execution: ' + str(ee))
self.dbase._adapter.reconnect() this_doer = change_status( this_doer, COMPLETED, 'Normal exit' )--
Resources:
- http://web2py.com
- http://web2py.com/book (Documentation)
- http://github.com/web2py/web2py (Source code)
- https://code.google.com/p/web2py/issues/list (Report Issues)
---
You received this message because you are subscribed to a topic in the Google Groups "web2py-users" group.
To unsubscribe from this topic, visit https://groups.google.com/d/topic/web2py/U-hkmIc3TZM/unsubscribe.
To unsubscribe from this group and all its topics, send an email to web2py+un...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
db._adapter.close()
# call long running process
db._adapter.reconnect()Ian, as mentioned before, with the last trunk a reconnect can be done with the following:- db._adapter.close()- db._adapter.reconnect()Without the last trunk, try the following:try:db._adapter.close()except:db._adapter.connection = Nonedb._adapter.reconnect()but my suggestion is to use the last trunk
Paolo
2015-03-17 11:02 GMT+01:00 Ian Ryder <i.r...@appichar.com.au>:
The issue sits with the parent method - it calls a method which takes > 300 seconds, then whatever action is taken with the database in the parent method on return blows up.I think I've cracked it though - and to get there I had to drop the web2py scheduler and write my own so I knew what was going on!Anyway, for anyone else in the same situation (unless web2py handles this natively down the track), I basically have to reconnect completely, ie:
- db = DAL('mysql://XXXXXXXXXX@mysql.server/xxxxxxxxxxx', fake_migrate=False, pool_size=10)
In the Scheduler.py file?
For some reason I'm still getting the lost connection error. I'm putting the close() and reconnect() in Scheduler.py and they surround the long processes.