Revision: bccc3d0be248
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:41:07 2012
Log: Refactor VCF writer and add support for annotating segemental
duplicat...
http://code.google.com/p/glu-genetics/source/detail?r=bccc3d0be248
Revision: 1b6dfb82960f
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:41:52 2012
Log: Minor optimization
http://code.google.com/p/glu-genetics/source/detail?r=1b6dfb82960f
Revision: a2ad14f78989
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:45:05 2012
Log: Use heappushpop instead of heapreplace to make sure returned item
is a...
http://code.google.com/p/glu-genetics/source/detail?r=a2ad14f78989
Revision: 438c215355c8
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:46:26 2012
Log: Use the new pysam set_opt method for read group processing and
make th...
http://code.google.com/p/glu-genetics/source/detail?r=438c215355c8
Revision: 3a91a6f86274
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:47:09 2012
Log: Slightly refactor VCF flattening code
http://code.google.com/p/glu-genetics/source/detail?r=3a91a6f86274
==============================================================================
Revision: bccc3d0be248
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:41:07 2012
Log: Refactor VCF writer and add support for annotating segemental
duplications
and repeat regions.
http://code.google.com/p/glu-genetics/source/detail?r=bccc3d0be248
Modified:
/glu/lib/genedb/__init__.py
/glu/lib/genedb/queries.py
/glu/lib/seqlib/vannotator.py
/glu/lib/seqlib/vcf.py
/glu/modules/seq/annotate.py
=======================================
--- /glu/lib/genedb/__init__.py Sun Jul 31 06:16:45 2011
+++ /glu/lib/genedb/__init__.py Sun Apr 8 12:41:07 2012
@@ -12,9 +12,10 @@
import sqlite3
-DEFAULT_GENEDB = ['genedb_hg19_snp132_rtree.db']
+DEFAULT_GENEDB = ['genedb_hg19_snp135_rtree.db']
DEFAULT_PATHS = [os.path.join(os.path.dirname(__file__),'data'),
+ '/CGF/Resources/Data/genedb',
'/usr/local/share/genedb',
'/usr/share/genedb']
=======================================
--- /glu/lib/genedb/queries.py Tue Feb 21 09:22:00 2012
+++ /glu/lib/genedb/queries.py Sun Apr 8 12:41:07 2012
@@ -8,8 +8,7 @@
import re
-from glu.lib.utils import namedtuple
-
+from glu.lib.recordtype import recordtype
CANONICAL_TRANSCRIPT = 1
CANONICAL_CHROMOSOME = 1<<1
@@ -22,6 +21,11 @@
cyto_re = re.compile('(\d+|X|Y)(?:([p|q])(?:(\d+)(.\d+)?)?)?$')
+GeneRecord = recordtype('GeneRecord', 'alias symbol chrom txStart
txEnd strand featureType')
+SegDupRecord = recordtype('SegDupRecord', 'chrom start stop strand
other_chrom other_start other_stop matchFraction')
+RepeatRecord = recordtype('RepeatRecord', 'chrom start stop strand
repeatName repeatClass repeatFamily')
+
+
def query_genes_by_name(con, gene, canonical_contig=True,
canonical_transcript=None, mapped=None):
sql = '''
SELECT a.Alias,g.symbol,g.chrom,MIN(g.txStart) as start,MAX(g.txEnd)
as end,g.strand,"GENE"
@@ -65,6 +69,8 @@
if new_genes:
genes = new_genes
+ genes = [ GeneRecord(*g) for g in genes ]
+
return genes
@@ -142,7 +148,8 @@
cur = con.cursor()
cur.execute(sql, (chrom, end, start))
- return cur.fetchall()
+
+ return [ GeneRecord(*g) for g in cur.fetchall() ]
def split_cytoband(band):
@@ -353,3 +360,42 @@
cur.execute(sql,(chrom,end,start))
return cur.fetchall()
+
+
+def query_segdups(con,chrom,start,end):
+ sql = '''
+ SELECT
chrom,start,stop,strand,other_chrom,other_start,other_stop,match_fraction
+ FROM SEGDUP
+ WHERE chrom = ?
+ AND id in (SELECT id
+ FROM segdup_index
+ WHERE start < ?
+ AND stop > ?)
+ ORDER BY start,stop,other_chrom,other_start,other_stop;
+ '''
+ if chrom.startswith('chr'):
+ chrom = chrom[3:]
+
+ cur = con.cursor()
+ cur.execute(sql,(chrom,end,start))
+ return [ SegDupRecord(*d) for d in cur ]
+
+
+def query_repeats(con,chrom,start,end):
+ sql = '''
+ SELECT chrom,start,stop,strand,name,class,family
+ FROM RMSK
+ WHERE id in (SELECT id
+ FROM rmsk_index
+ WHERE chrom = ?
+ AND chrom1 = ?
+ AND start < ?
+ AND stop > ?)
+ ORDER BY start,stop;
+ '''
+ if chrom.startswith('chr'):
+ chrom = chrom[3:]
+
+ cur = con.cursor()
+ cur.execute(sql,(chrom,chrom,end,start))
+ return [ RepeatRecord(*r) for r in cur ]
=======================================
--- /glu/lib/seqlib/vannotator.py Tue Feb 21 06:31:32 2012
+++ /glu/lib/seqlib/vannotator.py Sun Apr 8 12:41:07 2012
@@ -25,7 +25,6 @@
from glu.lib.seqlib.intervaltree import IntervalTree
from glu.lib.genedb import open_genedb
-from glu.lib.genedb.queries import query_snps_by_location
cyto_re = re.compile('(\d+|X|Y)(?:([p|q])(?:(\d+)(.\d+)?)?)?$')
@@ -33,8 +32,9 @@
BandRecord = namedtuple('BandRecord', 'name chrom start end color')
GeneRecord = namedtuple('GeneRecord', 'id name symbol geneid mRNA
protein canonical chrom strand '
- 'txStart txEnd cdsStart cdsEnd
exonCount exonStarts exonEnds')
-SNPRecord = namedtuple('SNPRecord', 'name chrom start end strand
refAllele alleles vclass func weight')
+ 'txStart txEnd cdsStart cdsEnd
exonCount exonStarts exonEnds '
+ 'category isRefSeq startcomplete
endComplete nonsenseMediatedDecay '
+ 'strangeSplice atacIntrons
selenocysteine')
GeneFeature = namedtuple('GeneFeature', 'chrom start end cds_index
exon_num type')
GeneEvidence = recordtype('GeneEvidence', 'chrom cytoband ref_start
ref_end intersect gene details '
'func func_class func_type
ref_nuc var_nuc ref_aa var_aa')
@@ -86,6 +86,9 @@
left,right = "3' UTR","5' UTR"
intron_offset = 0
+ if gene.category=='noncoding' or cds_start==cds_end:
+ left = right = 'UTR'
+
last = gene.txStart
cds_index = 0
@@ -168,65 +171,6 @@
return ''.join(ref[:match])
-def get_snps(con):
- cur = con.cursor()
- cur.execute('SELECT * FROM SNP;')
-
- make = SNPRecord._make
- strcache = {}
- allelecache = {}
- funccache = {}
-
- def intern(x,strcache=strcache.setdefault): return strcache(x,x)
-
- empty = ()
-
- for row in cur:
- # name chrom start end strand refAllele alleles vclass func weight
- row = list(row)
- row[1] = intern(row[1])
- row[4] = intern(row[4])
- row[5] = intern(row[5]) if len(row[5])<4 else row[5]
-
- alleles = allelecache.get(row[6])
- if alleles is not None:
- row[6] = alleles
- else:
- alleles = [a.replace('-','') for a in row[6].split('/')]
- if max(len(a) for a in alleles)>3:
- alleles = tuple(intern(a) if len(a)<3 else a for a in alleles)
- row[6] = alleles
- else:
- alleles = tuple(intern(a) for a in alleles)
- row[6] = allelecache[row[6]] = alleles
-
- row[7] = intern(row[7])
-
- func = funccache.get(row[8] or '')
- if func is not None:
- row[8] = func
- else:
- func = tuple(intern(f) for f in row[8].split(',')) if row[8] else
empty
- row[8] = funccache[row[8]] = func
-
- row[9] = int(row[9])
-
- yield make(row)
-
-
-def get_snps_interval(con, chrom, ref_start, ref_end):
- snps = query_snps_by_location(con, chrom, ref_start, ref_end)
-
- make = SNPRecord._make
- for row in snps:
- # name chrom start end strand refAllele alleles vclass func weight
- row = list(row)
- row[4] = row[4].replace('-','')
- row[5] = set(a.replace('-','') for a in row[5].split('/'))
- row[7] = set(row[7].split(',')) if row[7] else set()
- yield make(row)
-
-
def group_evidence(orig):
key_func = itemgetter(0,1,3,4,5,6,7,8)
orig.sort(key=key_func)
@@ -347,17 +291,10 @@
parts = set(intersect)
mut_type = set()
- if gene.strand=='+':
- left,right = "5'","3'"
- else:
- left,right = "3'","5'"
-
for splice in gene_parts.find_values(ref_start-10,ref_end+10):
if splice.type=='CDS' or 'UTR' in splice.type:
- if 0<splice.start-ref_end<=10:
- mut_type.add('POSSIBLE %s INTRONIC SPLICE VARIANT' % left)
- if 0<ref_start-splice.end<=10:
- mut_type.add('POSSIBLE %s INTRONIC SPLICE VARIANT' % right)
+ if (0<splice.start-ref_end<=10) or (0<ref_start-splice.end<=10):
+ mut_type.add('POSSIBLE INTRONIC SPLICE VARIANT')
parts = ','.join(sorted(parts))
mut_type = ','.join(sorted(mut_type))
@@ -530,25 +467,3 @@
result +=
[True,'NON-SYNONYMOUS',mut_type,ref_cds_nuc,var_cds_nuc,str(ref_aa),str(var_aa)]
return result
-
-
- def get_dbsnp(self, chrom, ref_start, ref_end, ref_nuc, var_nuc):
- snps = list(get_snps_interval(self.con, chrom, ref_start, ref_end))
- var_comp = str(Seq(var_nuc).complement())
- var = set([var_nuc,var_comp])
-
- exact = set()
- inexact = set()
-
- for snp in snps:
- alleles = set(str(snp.alleles).split('/'))
- match = (snp.start==ref_start and snp.end==ref_end and var&alleles)
- #print
snp.name,snp.start,ref_start,snp.end,ref_end,snp.refAllele,ref_nuc,var,alleles,var&alleles,match
- #print
snp.name,snp.start==ref_start,snp.end==ref_end,snp.refAllele,ref_nuc,
-
- if match:
- exact.add(snp.name)
- else:
- inexact.add(snp.name)
-
- return sorted(exact), sorted(inexact)
=======================================
--- /glu/lib/seqlib/vcf.py Wed Mar 14 06:29:49 2012
+++ /glu/lib/seqlib/vcf.py Sun Apr 8 12:41:07 2012
@@ -9,9 +9,11 @@
import csv
import sys
-from collections import defaultdict
-
-from glu.lib.fileutils import table_reader
+from collections import defaultdict, OrderedDict
+
+import pysam
+
+from glu.lib.fileutils import table_reader, autofile, hyphen
from glu.lib.recordtype import recordtype
@@ -25,8 +27,7 @@
self.data = data = table_reader(filename,hyphen=sys.stdin)
- self.metadata_order = metadata_order = []
- self.metadata = metadata = defaultdict(list)
+ self.metadata = metadata = OrderedDict()
self.header = None
self.samples = None
@@ -41,7 +42,7 @@
meta = meta[2:]
if meta not in metadata:
- metadata_order.append(meta)
+ metadata[meta] = []
metadata[meta].append(row[0])
@@ -62,7 +63,7 @@
for row in self.data:
chrom = intern(row[0])
start = int(row[1])-1
- names = row[2].split(',') if row[2]!='.' else []
+ names = row[2].split(';') if row[2]!='.' else []
ref = intern(row[3])
end = start+len(ref)
var = [ intern(v) for v in row[4].split(',') ]
@@ -84,3 +85,44 @@
var = [ intern(v[1:]) for v in var ]
yield
VCFRecord(chrom,start,end,names,ref,var,qual,filter,info,format,genos)
+
+
+class VCFWriter(object):
+ def __init__(self, filename, metadata, names, reference):
+ self.reference = pysam.Fastafile(reference)
+ self.out = out = autofile(hyphen(filename,sys.stdout),'w')
+
+ for meta in metadata:
+ for m in metadata[meta]:
+ out.write(m)
+ out.write('\n')
+
+
out.write('\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT']+names))
+ out.write('\n')
+
+ def write_locus(self, vcfvar):
+ # FORMAT: chrom start end names ref var filter info format genos
+ pos = vcfvar.start+1
+ ref = vcfvar.ref
+ var = vcfvar.var
+
+ # VCF codes indels with an extra left reference base
+ if not vcfvar.ref or '' in vcfvar.var:
+ pos -= 1
+ r = self.reference.fetch(vcfvar.chrom,pos-1,pos).upper()
+ ref = r+ref
+ var = [ r+a for a in var ]
+
+ row = [ vcfvar.chrom,
+ str(pos),
+ ';'.join(vcfvar.names) or '.',
+ ref,
+ ','.join(var),
+ vcfvar.qual,
+ ';'.join(sorted(vcfvar.filter)) or '.',
+ ';'.join(vcfvar.info),
+ vcfvar.format ] + [ ':'.join(g) for g in vcfvar.genos ]
+
+ out = self.out
+ out.write('\t'.join(map(str,row)))
+ out.write('\n')
=======================================
--- /glu/modules/seq/annotate.py Wed Mar 14 06:29:49 2012
+++ /glu/modules/seq/annotate.py Sun Apr 8 12:41:07 2012
@@ -16,8 +16,9 @@
from glu.lib.fileutils import list_reader, autofile, hyphen
from glu.lib.progressbar import progress_loop
-
-from glu.lib.seqlib.vcf import VCFReader
+from glu.lib.genedb.queries import query_segdups, query_repeats
+
+from glu.lib.seqlib.vcf import VCFReader, VCFWriter
from glu.lib.seqlib.cga import cga_reader
from glu.lib.seqlib.vannotator import VariantAnnotator
from glu.lib.seqlib.cgfvariants import CGFVariants
@@ -126,6 +127,12 @@
for e in nsevidence )
nsinfo = list(unique(nsinfo))
+ segdups = query_segdups(vs.con, v.chrom, v.start, v.end)
+ repeats = query_repeats(vs.con, v.chrom, v.start, v.end)
+
+ while 'PASS' in v.filter:
+ v.filter.remove('PASS')
+
if not genes:
v.filter.append('Intergenic')
@@ -150,6 +157,20 @@
if nsinfo:
new_info.append('GENE_FUNCTION_DETAILS=%s' % (','.join(nsinfo )))
+ if segdups:
+ segdup_info = ('%s:%s-%s:%.2f' %
(s.other_chrom,s.other_start,s.other_stop,s.matchFraction*100) for s in
segdups)
+ segdup_info = ','.join(segdup_info)
+ v.filter.append('SegDup')
+ new_info.append('SEGDUP_COUNT=%d' % len(segdups))
+ new_info.append('SEGDUP_INFO=%s' % segdup_info)
+
+ if repeats:
+ repeat_info = ('%s:%s:%s' %
(r.repeatName,r.repeatClass,r.repeatFamily) for r in repeats)
+ repeat_info = ','.join(repeat_info)
+ v.filter.append('Repeat')
+ new_info.append('REPEAT_COUNT=%d' % len(repeats))
+ new_info.append('REPEAT_INFO=%s' % repeat_info)
+
if cv:
cvinfo = cv.score_and_classify(v.chrom,v.start,v.end,[v.ref,v.var[0]])
if cvinfo.exact_vars:
@@ -216,8 +237,7 @@
new_info_fields = set(f.split('=',1)[0] for f in new_info)
v.info = new_info+[ f for f in v.info if f.split('=',1)[0] not
in new_info_fields ]
- if len(v.filter)>1 and 'PASS' in v.filter:
- v.filter.remove('PASS')
+ v.filter = v.filter or ['PASS']
return v
@@ -226,7 +246,6 @@
vs = VariantAnnotator(options.genedb, options.reference)
vcf = VCFReader(options.variants,sys.stdin)
cv = CGFVariants(options.cgfvariants, options.reference) if
options.cgfvariants else None
- out = autofile(hyphen(options.output,sys.stdout),'w')
if options.kaviar:
references = list_reader(options.reference+'.fai')
@@ -238,10 +257,16 @@
refvars = ReferenceVariants(options.refvars,options.refingroup) if
options.refvars else None
metadata = vcf.metadata
+
+ metadata.setdefault('FILTER',[])
+ metadata.setdefault('INFO',[])
+
metadata['FILTER'].append('##FILTER=<ID=PartiallyCalled,Description="Variant
is not called for one or more samples">')
metadata['FILTER'].append('##FILTER=<ID=Indel,Description="Variant is an
insertion or deletion">')
metadata['FILTER'].append('##FILTER=<ID=Intergenic,Description="Variant
not in or near a gene">')
metadata['FILTER'].append('##FILTER=<ID=NPF,Description="Variant is not
predicted to alter a protein">')
+ metadata['FILTER'].append('##FILTER=<ID=SegDup,Description="Variant
occurs in a segmentally duplicated region">')
+ metadata['FILTER'].append('##FILTER=<ID=Repeat,Description="Variant
occurs in a repetitive or low-complexity region">')
if cv:
metadata['FILTER'].append('##FILTER=<ID=Common,Description="Variant is
likely common with common score>%f">' % options.commonscore)
@@ -263,6 +288,10 @@
metadata['INFO'].append('##INFO=<ID=GENE_LOCATION,Number=.,Type=String,Description="Location
of variant in gene(s)">')
metadata['INFO'].append('##INFO=<ID=GENE_FUNCTION,Number=.,Type=String,Description="Functional
classification of variant for each gene and transcript">')
metadata['INFO'].append('##INFO=<ID=GENE_FUNCTION_DETAILS,Number=.,Type=String,Description="Functional
details of variant for each gene and transcript">')
+
metadata['INFO'].append('##INFO=<ID=SEGDUP_COUNT,Number=1,Type=Integer,Description="Number
of segmental duplications that overlap variant locus">')
+
metadata['INFO'].append('##INFO=<ID=SEGDUP_INFO,Number=.,Type=String,Description="Details
of segmental duplications that overlap variant locus">')
+
metadata['INFO'].append('##INFO=<ID=REPEAT_COUNT,Number=1,Type=Integer,Description="Number
of repetitive or low complexity elements that overlap variant locus">')
+
metadata['INFO'].append('##INFO=<ID=REPEAT_INFO,Number=.,Type=String,Description="Details
of repetitive or low complexity elements that overlap variant locus">')
if cv:
metadata['INFO'].append('##INFO=<ID=COMMON_SCORE,Number=1,Type=Float,Description="Common
score: maximum allele frequency in any population for rarest allele">')
@@ -279,34 +308,12 @@
metadata['INFO'].append('##INFO=<ID=REFVAR_INGROUP_NAMES,Number=.,Type=String,Description="Intra-group
samples in which Variant is present">')
metadata['INFO'].append('##INFO=<ID=REFVAR_OUTGROUP_NAMES,Number=.,Type=String,Description="Extra-group
samples in which Variant is present">')
- for meta in vcf.metadata_order:
- for m in metadata[meta]:
- out.write(m)
- out.write('\n')
-
- out.write('#%s\n' % ('\t'.join(vcf.header)))
+ out = VCFWriter(options.output, metadata, vcf.samples, options.reference)
for v in vcf:
update_vcf_annotation(v, vs, cv, kaviar, refvars, options)
- # FORMAT: chrom start end names ref var filter info format genos
- pos = v.start+1
- ref = v.ref
- var = v.var
-
- # VCF codes indels with an extra left reference base
- if not v.ref or '' in v.var:
- pos -= 1
- r = vs.reference.fetch(v.chrom,pos-1,pos).upper()
- ref = r+ref
- var = [ r+a for a in var ]
-
- row = [ v.chrom, str(pos), ','.join(v.names) or '.',
ref, ','.join(var), v.qual,
- ';'.join(sorted(v.filter)) or '.',
- ';'.join(v.info), v.format ] +
[ ':'.join(g) for g in v.genos ]
-
- out.write('\t'.join(row))
- out.write('\n')
+ out.write_locus(v)
def valid_allele(a):
==============================================================================
Revision: 1b6dfb82960f
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:41:52 2012
Log: Minor optimization
http://code.google.com/p/glu-genetics/source/detail?r=1b6dfb82960f
Modified:
/glu/modules/seq/merge_var.py
=======================================
--- /glu/modules/seq/merge_var.py Thu Dec 29 11:11:15 2011
+++ /glu/modules/seq/merge_var.py Sun Apr 8 12:41:52 2012
@@ -84,13 +84,13 @@
alleles.append(a1)
allelemap[a1] = len(alleles)
function.append(function_records(var.allele1Gene))
- xrefs.append(var.allele1XRef if var.allele1XRef else '')
+ xrefs.append(var.allele1XRef or '')
if a2 not in allelemap:
alleles.append(a2)
allelemap[a2] = len(alleles)
function.append(function_records(var.allele2Gene))
- xrefs.append(var.allele2XRef if var.allele2XRef else '')
+ xrefs.append(var.allele2XRef or '')
individuals.append(var.individual)
genotypes.append('%s/%s' % (allelemap[a1],allelemap[a2]))
@@ -126,13 +126,13 @@
alleles.append(a1)
allelemap[a1] = len(alleles)
function.append(function_records(var.allele1Gene))
- xrefs.append(var.allele1XRef if var.allele1XRef else '')
+ xrefs.append(var.allele1XRef or '')
if a2 not in allelemap:
alleles.append(a2)
allelemap[a2] = len(alleles)
function.append(function_records(var.allele2Gene))
- xrefs.append(var.allele2XRef if var.allele2XRef else '')
+ xrefs.append(var.allele2XRef or '')
genotypes[ indmap[var.individual] ] = '%s/%s' %
(allelemap[a1],allelemap[a2])
@@ -194,7 +194,6 @@
out.writerow(row)
-
if __name__=='__main__':
if 1:
main()
==============================================================================
Revision: a2ad14f78989
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:45:05 2012
Log: Use heappushpop instead of heapreplace to make sure returned item
is always
the smallest. In simplifies the subsequent code and strengthens the heap
invariant.
http://code.google.com/p/glu-genetics/source/detail?r=a2ad14f78989
Modified:
/glu/lib/utils.py
=======================================
--- /glu/lib/utils.py Wed Feb 1 08:13:41 2012
+++ /glu/lib/utils.py Sun Apr 8 12:45:05 2012
@@ -547,7 +547,7 @@
'''
Internal function. See sort_almost_sorted
'''
- from heapq import heapify, heapreplace, heappop
+ from heapq import heapify, heappushpop, heappop
# STAGE 1: Fill initial window and heapify
it = iter(sequence)
@@ -562,10 +562,11 @@
# STAGE 2: Slide window until end of sequence
last = heap[0]
- # Loop invariant:
+ # Loop invariants:
# len(heap)==c, where c is a constant 0 < c <= windowsize
+ # all(e>=last for e in heap)
for item in it:
- item = heapreplace(heap, item)
+ item = heappushpop(heap, item)
if item<last:
raise OrderError('Misordered keys beyond window size')
@@ -574,43 +575,32 @@
yield item
- # STAGE 3: Check transition from sliding window phase to
- # draining window phase
-
- # Invariant: len(heap)>0
- item = heappop(heap)
-
- # Must check, since the newly pushed item could still be less than last
- if item<last:
- raise OrderError('Misordered keys beyond window size')
-
- last = item
-
- yield item
-
- # STAGE 4: Drain window, no need to check sort order of remaining
elements
+ # Release reference to last item, since it is no longer needed
+ del last
+
+ # STAGE 3: Drain window, no need to check sort order of remaining
elements
# since remaining elements must be within windowsize distance
while heap:
yield heappop(heap)
-def check_sorted(sequence,key=None):
+def check_sorted_iter(sequence,key=None):
'''
- check_sorted(sequence,key=None) -> sequence
+ check_sorted_iter(sequence,key=None) -> sequence
Returns a generator that yields all elements of sequence, provided that
the elements are sorted in non-descending order. Otherwise an OrderError
exception is raised.
- >>> list(check_sorted([1,2,3]))
+ >>> list(check_sorted_iter([1,2,3]))
[1, 2, 3]
- >>> list(check_sorted([3,2,1]))
+ >>> list(check_sorted_iter([3,2,1]))
Traceback (most recent call last):
...
OrderError: Invalid sort order
- >>> list(check_sorted([1.7,1.5,1.6,1.4], key=lambda x: int(x)))
+ >>> list(check_sorted_iter([1.7,1.5,1.6,1.4], key=lambda x: int(x)))
[1.7, 1.5, 1.6, 1.4]
'''
it = iter(sequence)
==============================================================================
Revision: 438c215355c8
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:46:26 2012
Log: Use the new pysam set_opt method for read group processing and
make the
progress bar selectable via a command line option.
http://code.google.com/p/glu-genetics/source/detail?r=438c215355c8
Modified:
/glu/modules/seq/filter.py
=======================================
--- /glu/modules/seq/filter.py Tue Feb 21 06:32:18 2012
+++ /glu/modules/seq/filter.py Sun Apr 8 12:46:26 2012
@@ -41,40 +41,8 @@
# Return a nested generator, since header update must occur immediately
def _set_readgroup():
- rg = ('RG',groupname)
-
for align in aligns:
- tags = align.tags
-
- if tags:
- align.tags = tags = [ (k,v) for k,v in tags if k[0]!='X' ]
-
- if tags is None:
- align.tags = [rg]
- elif not tags:
- align.tags.append(rg)
- else:
- for i,(name,value) in enumerate(tags):
- if name=='RG':
- tags[i] = rg
- break
- else:
- tags.append(rg)
-
- align.tags = tags
-
- #tags = align.tags or []
- #
- #try:
- # names,values = zip(*tags)
- # idx = names.index('RG')
- # tags[idx] = rg
- #
- #except ValueError:
- # tags.append(rg)
- #
- #align.tags = tags
-
+ align.set_opt('RG',groupname)
yield align
return _set_readgroup()
@@ -587,6 +555,8 @@
help='Summary output file')
parser.add_argument('--contigstats', metavar='FILE',
help='Contig statistics')
+ parser.add_argument('-P', '--progress', action='store_true',
+ help='Show analysis progress bar, if possible')
return parser
@@ -632,7 +602,9 @@
controls = set(rmap.get(c) for c in set(list_reader(options.controls)))
controls.discard(None)
- #aligns = progress_loop(aligns, label='Loading BAM file(s): ',
units='alignments')
+ if options.progress:
+ aligns = progress_loop(aligns, label='Loading BAM file(s): ',
units='alignments')
+
aligns = filter_alignments(aligns, options.includealign,
options.excludealign)
stats = FilterStats()
==============================================================================
Revision: 3a91a6f86274
Author: Kevin B. Jacobs <jac...@bioinformed.com>
Date: Sun Apr 8 12:47:09 2012
Log: Slightly refactor VCF flattening code
http://code.google.com/p/glu-genetics/source/detail?r=3a91a6f86274
Modified:
/glu/modules/seq/vcf2table.py
=======================================
--- /glu/modules/seq/vcf2table.py Thu Dec 29 11:07:06 2011
+++ /glu/modules/seq/vcf2table.py Sun Apr 8 12:47:09 2012
@@ -25,9 +25,7 @@
from glu.lib.seqlib.refvariants import ReferenceVariants
-def flatten_vcf(options):
- vcf = VCFReader(options.variants,sys.stdin)
-
+def flatten_vcf(vcf,records=None):
filters = []
info = []
samples = vcf.samples or []
@@ -58,10 +56,12 @@
+ samples
+ [ '%s_depth' % s for s in samples ] )
- out = table_writer(options.output,hyphen=sys.stdout)
- out.writerow(header)
-
- for v in vcf:
+ yield header
+
+ if records is None:
+ records = iter(vcf)
+
+ for v in records:
# FORMAT: chrom start end names ref var filter info format genos
infomap = {}
@@ -79,7 +79,7 @@
+ [ g[0] for g in v.genos ]
+ [ '/'.join(g[1].split(',')) if len(g)>1 else '' for g in
v.genos ] )
- out.writerow(row)
+ yield row
def option_parser():
@@ -98,4 +98,8 @@
parser = option_parser()
options = parser.parse_args()
- flatten_vcf(options)
+ vcf = VCFReader(options.variants,sys.stdin)
+ results = flatten_vcf(vcf)
+
+ out = table_writer(options.output,hyphen=sys.stdout)
+ out.writerows(results)