I'm really under pressure at the time
briefly
I've always used for my temporary migration data to a csv file for checking the correctness of the procedure and then load in the table post process
below a ctl file that describes how I tried to do, I hope will be helpful.
I hope in particular to be able to breathe to take back this part because at the time the "emergency" have become more :-)
environment ||= "development"
ActiveRecord::Base.establish_connection(environment)
require File.expand_path('../../config/environment', __FILE__)
file = File.expand_path(File.dirname(__FILE__) + '/ana_source/DEV.csv')
file2 = File.expand_path(File.dirname(__FILE__) + '/out/scale.csv')
source_fields = [
  :codice,
  ...
]
source :in, {
  :file => file,
  :parser => :csv #,
#  :skip_lines => 13
}, source_fields
after_read do |r|
  r[:location] =~ /\A(\d{5}) (.*) \((\w{2})\)\z/ ? r : nil
end
##  find rails id for condominio with codice
    copy :codice, :condominio_id
    transform(:condominio_id) do |key, value, r|
      r[:condominio_id] = r[:condominio_id].gsub(/\AC(\d{3})\.\d\z/, '\\1')
    end
    transform :condominio_id, :foreign_key_lookup, {
      :resolver => ActiveRecordResolver.new(Condominio, :find_by_codice),
    }
before_write do |r|
  r[:codice] = r[:codice].gsub(/\AC\d{3}.(\d)\z/, '\\1')
  r
end
ora = DateTime.now
destination :out, {
  :file => file2,
  :truncate => true
},
{
  :order => [
    :id,
    :codice,
    :condominio_id,
   ...
    :created_at,
    :updated_at
    ],
  :virtual => {
    :id => :surrogate_key,
    :created_at => ora,
    :updated_at => ora
  }
}
table = "scale"
post_process :bulk_import, {
   :file => file2,
   :truncate => true,
   :columns => [
    :id,
    :codice,
...
    :created_at,
    :updated_at
    ],
   :field_separator => ',',
   :target => environment,
   :table => table
 }
post_process do
  query = "SELECT setval('#{table}_id_seq', (SELECT MAX(id) FROM #{table})+1);"
  ETL::Engine.connection(environment).execute(query)
end