-
Notifications
You must be signed in to change notification settings - Fork 30
/
markerFinder.py
executable file
·2094 lines (1917 loc) · 102 KB
/
markerFinder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import unique
import export
import ExpressionBuilder
import copy
import traceback
try:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
from stats_scripts import statistics
import math
from scipy import stats
use_scipy = True
except Exception:
use_scipy = False ### scipy is not required but is used as a faster implementation of Fisher Exact Test when present
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt"or entry[-4:] == ".tab" or entry[-4:] == ".csv" or '.fa' in entry: dir_list2.append(entry)
return dir_list2
class GrabFiles:
def setdirectory(self,value):
self.data = value
def display(self):
print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
file_dirs = getDirectoryFiles(self.data,str(search_term))
if len(file_dirs)<1: print search_term,'not found',self.data
return file_dirs
def getDirectoryFiles(import_dir, search_term):
exact_file = ''; exact_file_dirs=[]
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: exact_file_dirs.append(affy_data_dir)
return exact_file_dirs
########## End generic file import ##########
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def createCorrelationTemplate(tissues):
i=0; tissue_template_db={}
null_template = [0]*len(tissues)
for tissue in tissues:
tissue_template = list(null_template) ### creates a modifiable copy
tissue_template[i] = correlationDirection ### make -1 reverse profile (anti-correlated)
#if 'Atrium' in tissue:
tissue_template_db[tissue] = tissue_template
i+=1
return tissue_template_db
def findHousekeepingGenes(uid,data_list1):
### Pearson won't work for no variance, so have to use an alternative approach
stdev = statistics.stdev(data_list1)
housekeeping.append([stdev,uid]) #stdev/min(data_list1)
def expressedIndexes(values):
filtered_indexes=[]
i=0
for value in values:
if value!=None:
filtered_indexes.append(i)
i+=1
return filtered_indexes
def advancedPearsonCorrelationAnalysis(uid,data_list1,tissue_template_db):
expIndexes = expressedIndexes(data_list1)
Queried[uid]=[]
if (float(len(expIndexes))/len(data_list1))>0.0: ### Atleast 50% of samples evaluated express the gene
data_list = map(lambda i: data_list1[i],expIndexes) ### Only expressed values (non-None)
max_diff = max(data_list)-statistics.avg(data_list)
if max_diff>-1000 and max(data_list)>-1000:
if correlateAllGenes:
min_rho = -1
min_p = 1
else:
min_rho = 0.3
min_p = 0.05
for tissue in tissue_template_db:
tissue_template = tissue_template_db[tissue]
c1 = tissue_template.count(1)
filtered_template = map(lambda i: tissue_template[i],expIndexes)
c2 = filtered_template.count(1)
if len(data_list)!= len(filtered_template): kill
if c1 == c2 or c1 != c2: ### If number of 1's in list1 matches list2
rho,p = rhoCalculation(data_list,filtered_template)
if tissue == 'Housekeeping':
print tissue, p, uid;sys.exit()
#if rho>min_rho:
if p<min_p and rho>min_rho:
Added[uid]=[]
try: tissue_scores[tissue].append([(rho,p),uid])
except Exception: tissue_scores[tissue] = [[(rho,p),uid]]
def PearsonCorrelationAnalysis(uid,data_list1,tissue_template_db):
if correlateAllGenes:
min_rho = -2
min_p = 1
else:
min_rho = 0.3
min_p = 0.05
for tissue in tissue_template_db:
tissue_template = tissue_template_db[tissue]
rho,p = rhoCalculation(data_list1,tissue_template)
#print rho, uid, tissue
if tissue == 'Housekeeping':
print tissue, rho, uid;sys.exit()
#if rho>min_rho:
if p<min_p and rho>min_rho:
try: tissue_scores[tissue].append([(rho,p),uid])
except Exception: tissue_scores[tissue] = [[(rho,p),uid]]
def rhoCalculation(data_list1,tissue_template):
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
try:
rho,p = stats.pearsonr(data_list1,tissue_template)
return rho,p
except Exception:
#data_list_alt = [0 if x==None else x for x in data_list1]
#rho,p = stats.pearsonr(data_list1,tissue_template)
kill
except Exception:
rho = pearson(data_list1,tissue_template)
return rho, 'Null'
def simpleScipyPearson(query_lists,reference_list):
""" Get the top correlated values referenced by index of the query_lists (e.g., data matrix) """
i=0
rho_results=[]
for query_list in query_lists:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
rho,p = stats.pearsonr(query_list,reference_list)
#if query_list == reference_list: print query_list,reference_list, rho;sys.exit()
if str(rho)!='nan':
rho_results.append([(float(rho),float(p)),i])
i+=1
rho_results.sort()
rho_results.reverse()
return rho_results
def pearson(array1,array2):
item = 0; sum_a = 0; sum_b = 0; sum_c = 0
while item < len(array1):
a = (array1[item] - avg(array1))*(array2[item] - avg(array2))
b = math.pow((array1[item] - avg(array1)),2)
c = math.pow((array2[item] - avg(array2)),2)
sum_a = sum_a + a
sum_b = sum_b + b
sum_c = sum_c + c
item = item + 1
try: r = sum_a/math.sqrt(sum_b*sum_c)
except Exception: r =0
return r
def avg(array):
return sum(array)/len(array)
def getArrayData(filename,filtered_probeset):
fn=filepath(filename); x=0; k=0; expression_data={}; annotations={}; expression_data_str={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line) #remove endline
t = string.split(data,'\t')
if x == 0:
i=0
for h in t:
if 'Definition' in h: lti = i ### last tissue index
if 'Description' in h: lti = i ### last tissue index
i+=1
x=1
try:
header = t[1:lti]
annotation_header = t[lti:]
except Exception:
lti = len(t)
header=[]
annotation_header=[]
else:
probeset = t[0]
try:
if len(filtered_probeset)>0: ### Otherwise, just get all annotations
null = filtered_probeset[probeset]
exp_values = map(float, t[1:lti])
if log_transform:
try: exp_values = map(lambda x: math.log(x,2), exp_values)
except Exception:
exp_values = map(lambda x: math.log(x+1,2), exp_values)
expression_data_str[probeset] = map(str,exp_values)
expression_data[probeset] = exp_values
try: annotations[probeset] = t[lti:]
except KeyError:
annotations[probeset] = []
except KeyError:
null=[]
sum_tissue_exp = {}
for probeset in expression_data:
i=0
for fold in expression_data[probeset]:
try: sum_tissue_exp[i].append(fold)
except Exception: sum_tissue_exp[i] = [fold]
i+=1
expression_relative={}
for probeset in expression_data:
i=0
for fold in expression_data[probeset]:
ratio = str(fold/max(sum_tissue_exp[i]))
try: expression_relative[probeset].append(ratio)
except Exception: expression_relative[probeset] = [ratio]
i+=1
return expression_data_str, annotations, header, annotation_header
def importMarkerProfiles(filename,fl):
x=0
### Import correlated marker results
fn=filepath(filename)
marker_list = []
condition_list=[]
marker_db={}
probeset_symbol_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
x=1
else:
uid,symbol,rho,p,condition=t
probeset_symbol_db[uid]=symbol
try: marker_db[condition].append(uid)
except Exception: marker_db[condition] = [uid]
if condition not in condition_list:
condition_list.append(condition)
marker_condition_db={}
try: condition_list = getOrderedGroups(fl.DatasetFile()) ### Order this set with the same way as the samples on the opposite axis
except Exception,e:
#print 'failed',e
condition_list.sort()
condition_list.reverse() ### This makes this ordered top-down for clustering
for condition in condition_list:
if condition in marker_db:
for uid in marker_db[condition]:
if uid not in marker_list:
marker_list.append(uid) ### ranked and unique marker list
marker_condition_db[uid] = condition
exportMarkersForGOElite(filename,marker_db,fl) ### Export these lists for GO-Elite
return marker_list, probeset_symbol_db, marker_condition_db
def exportMarkersForGOElite(filename,gene_db,fl):
if fl.Vendor() == 'Affymetrix': system = 'X'
elif fl.Vendor() == 'Agilent': system = 'Ag'
elif fl.Vendor() == 'Illumina': system = 'Il'
elif 'other:' in fl.Vendor():
system = string.replace(fl.Vendor(),'other:','')
if system == 'Symbol': system = 'Sy'
else: system = 'Ma'
else: system = 'Sy'
root_dir = fl.OutputDir()
if 'ReplicateBased' in filename: suffix = '-ReplicateBased'
if 'MeanBased' in filename: suffix = '-MeanBased'
for markerSet in gene_db:
header = string.join(['Gene','System','Hit'],'\t')+'\n'
filename = root_dir+'GO-Elite/MarkerFinder/'+markerSet+suffix+'.txt'
export_obj = export.ExportFile(filename)
export_obj.write(header)
for gene in gene_db[markerSet]:
if 'ENS' in gene:
system = 'En'
try: system = system
except Exception: system = 'Swiss'
values = string.join([gene,system,'1'],'\t')+'\n'
export_obj.write(values)
export_obj.close()
def reorderInputFile(custom_path,marker_list,marker_condition_db):
x=0
### Import correlated marker results
fn=filepath(custom_path)
exp_db={}
probeset_symbol_db={}
#print custom_path;sys.exit()
#print fn
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
header = line
x=1
else:
uid = t[0]
uid2 = string.join(string.split(t[0],' ')[0:-1],' ') ### IDs can have spaces
if '///' in uid:
uid2 = string.split(uid,' ')[0] ### Affy ID with multiple annotations
exp_db[uid] = line
exp_db[uid2] = line ### The lookup below is finiky... having both uids added fixes splicing and gene-based markerfinder analyses (without creating redundancy)
### Over-write read in file
export_obj = export.ExportFile(custom_path)
export_obj.write(header)
marker_list.reverse() ### Reverse the order of the MarkerFinder results
for uid in marker_list:
condition = marker_condition_db[uid]
new_uid = condition+':'+uid
if uid in exp_db:
export_obj.write(condition+':'+exp_db[uid])
elif new_uid in exp_db:
export_obj.write(exp_db[new_uid])
else:
"""
print [uid], len(exp_db)
for i in exp_db:
print [i];break
print 'Error encountered with the ID:',uid, 'not in exp_db'; kill
"""
pass
export_obj.close()
def getOrderedGroups(filename):
group_list=[]
filename = string.replace(filename,'///','/')
filename = string.replace(filename,'//','/')
if 'ExpressionOutput' in filename:
filename = string.replace(filename,'ExpressionOutput','ExpressionInput')
filename = string.replace(filename,'-steady-state','')
filename = string.replace(filename,'DATASET-','exp.')
groups_dir = string.replace(filename,'exp.','groups.')
group_names_db = ExpressionBuilder.simpleGroupImport(groups_dir)[3]
for i in group_names_db: group_list.append(i)
group_list.reverse()
return group_list
def generateMarkerHeatMaps(fl,platform,convertNonLogToLog=False,graphics=[],Species=None):
from visualization_scripts import clustering
""" From the generated marker sets, output the replicate input data """
marker_root_dir = fl.OutputDir()+'/'+'ExpressionOutput/MarkerFinder'
#print 1,fl.DatasetFile()
#print 2, fl.Vendor()
for marker_dir in read_directory(marker_root_dir):
if 'MarkerGenes' in marker_dir and 'correlation' in marker_dir:
marker_dir = marker_root_dir+'/'+marker_dir
marker_list, probeset_symbol_db, marker_condition_db = importMarkerProfiles(marker_dir,fl)
custom_path = string.replace(marker_dir,'MarkerGenes','Clustering/MarkerGenes')
"""
print fl.DatasetFile()
print len(marker_list), marker_list[:3]
print len(probeset_symbol_db)
print custom_path
print convertNonLogToLog
print Species
print platform
print len(probeset_symbol_db)
print custom_path
print fl.Vendor()
print convertNonLogToLog
"""
ExpressionBuilder.exportGeometricFolds(fl.DatasetFile(),platform,marker_list,probeset_symbol_db,exportOutliers=False,exportRelative=False,customPath=custom_path,convertNonLogToLog=convertNonLogToLog)
reorderInputFile(custom_path,marker_list, marker_condition_db)
row_method = None; row_metric = 'cosine'; column_method = None; column_metric = 'euclidean'; color_gradient = 'yellow_black_blue'; transpose = False
import UI
gsp = UI.GeneSelectionParameters(Species,platform,fl.Vendor())
gsp.setPathwaySelect('None Selected')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setGeneSet('None Selected')
gsp.setJustShowTheseIDs('')
gsp.setTranspose(False)
gsp.setNormalize('median')
gsp.setGeneSelection('')
gsp.setClusterGOElite('GeneOntology')
#gsp.setClusterGOElite('BioMarkers')
"""
print custom_path
print graphics
print row_method
print column_method
print column_metric
"""
reload(clustering)
try:
graphics = clustering.runHCexplicit(custom_path, graphics, row_method, row_metric,
column_method, column_metric, color_gradient, gsp, contrast=5, display=False)
except Exception:
print traceback.format_exc()
print 'Error occured in generated MarkerGene clusters... see ExpressionOutput/MarkerFinder files.'
return graphics
def reorderMultiLevelExpressionFile(input_file):
### Takes an input file and re-orders it based on the order in the groups file... needed for multi-level expression file with replicates
from import_scripts import sampleIndexSelection
output_file = input_file[:-4]+'-output.txt'
filter_file = string.replace(input_file,'-steady-state','')
filter_file = string.replace(filter_file,'exp.','groups.')
filter_file = string.replace(filter_file,'stats.','groups.')
filter_file = string.replace(filter_file,'topSplice.','groups.')
filter_file = string.replace(filter_file,'filter.','groups.')
filter_names = sampleIndexSelection.getFilters(filter_file)
sampleIndexSelection.filterFile(input_file,output_file,filter_names)
c1 = verifyFileLength(input_file)
c2 = verifyFileLength(output_file)
if c1==c2:
os.remove(input_file)
export.copyFile(output_file, input_file)
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
if line[0]!='#':
count+=1
except Exception: null=[]
return count
def analyzeData(filename,Species,Platform,codingType,geneToReport=60,correlateAll=True,AdditionalParameters=None,logTransform=False,binarize=False):
global genesToReport; genesToReport = geneToReport
global correlateAllGenes; correlateAllGenes = correlateAll
global all_genes_ranked; all_genes_ranked={}
global RPKM_threshold; global correlationDirection
global Added; Added={}; global Queried; Queried={}
"""
print 4,Platform, codingType, geneToReport, correlateAll, logTransform,
try:
#print AdditionalParameters.CorrelationDirection()
print AdditionalParameters.RPKMThreshold()
except Exception:
print 'nope'
"""
global AvgExpDir ### This global stored variable can persist even after MarkerFinder.py is reloaded
if len(filename) == 2:
filename, AvgExpDir = filename #### Used when there are replicate samples: avg_exp_dir is non-replicate
if AvgExpDir==None:
AvgExpDir = string.replace(filename,'-steady-state','')
AvgExpDir = string.replace(AvgExpDir,'exp.','AVERAGE-')
AvgExpDir = string.replace(AvgExpDir,'ExpressionInput','ExpressionOutput')
else:
AvgExpDir = None ### Delete the global stored AvgExpDir variable from previous MarkerFinder runs
if 'ExpressionOutput' in filename:
use_replicates = False
else:
use_replicates = True
import RNASeq
try: Platform = RNASeq.checkExpressionFileFormat(filename,Platform)
except Exception: Platform = "3'array"
try: RPKM_threshold = AdditionalParameters.RPKMThreshold() ### Used for exclusion of non-expressed genes
except Exception:
pass
if Platform == 'RNASeq':
try: RPKM_threshold = AdditionalParameters.RPKMThreshold() ### Used for exclusion of non-expressed genes
except Exception: RPKM_threshold = 1; logTransform = True
correlationDirection = 1.00 ### Correlate to a positive or negative idealized pattern
try:
if AdditionalParameters.CorrelationDirection() != 'up' and AdditionalParameters.CorrelationDirection() != 'positive':
correlationDirection = -1.00
except Exception: pass
#print correlationDirection
fn=filepath(filename); x=0; t2=['ID']; cluster_db={}; cluster_list = []; global coding_type; coding_type = codingType
global cluster_comps; cluster_comps = []; global compare_clusters; compare_clusters = 'no'
global housekeeping; housekeeping=[]; global analyze_housekeeping; analyze_housekeeping = 'no'
global species; global platform; species = Species; platform = Platform; global log_transform
log_transform=logTransform
#if 'topSplice.' not in fn and 'steady' not in fn and 'AVERAGE' not in fn and 'DATASET' not in fn: reorderMultiLevelExpressionFile(fn)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x == 0:
updated_names = ['ID']
correlations = 'single'
tissue_template_db,group_sample_db = getReplicateData(fn,t[1:])
if '~' in data: correlations = 'multiple'
elif t[1] in group_sample_db:
if '~' in group_sample_db[t[1]]:
correlations = 'multiple'
for i in t[1:]:
updated_names.append(group_sample_db[i])
t = updated_names
i=0
for h in t:
if correlations == 'multiple':
if '~' in h:
cluster, group_name = string.split(h,'~')
cluster = int(cluster)
try: cluster_db[cluster].append(i)
except Exception: cluster_db[cluster] = [i]
i+=1
if correlations == 'multiple':
compare_clusters = 'yes'
### If there are multiple sample group clusters
for cluster in cluster_db: cluster_list.append(cluster)
cluster_list.sort()
if cluster_list[0]==0: ### All clusters should then be compared to this first cluster if the first is named 0
cluster_comp_db={}; cluster_comp_db[0] = cluster_db[0]
cluster_comps.append(cluster_comp_db) ### Add the first by itself (cluster 0 samples only comapred among themselves initially)
for cluster in cluster_list[1:]:
cluster_comp_db={}; cluster_comp_db[0] = cluster_db[0]
cluster_comp_db[cluster] = cluster_db[cluster]
cluster_comps.append(cluster_comp_db) ### Each non-zero cluster compared to cluster 0 as pairwise group combinations
else:
for cluster in cluster_list:
cluster_comp_db={}
cluster_comp_db[cluster] = cluster_db[cluster]
cluster_comps.append(cluster_comp_db)
x = 1
break
iteration=1
if compare_clusters == 'yes':
tissue_specific_IDs_combined={}; correlations_combined={}
for cluster_comp_db in cluster_comps:
###Interate through each comparison
print 'Iteration',iteration,'of',len(cluster_comps)
tissue_specific_IDs,interim_correlations,annotation_headers,tissues = identifyMarkers(filename,cluster_comp_db,binarize=binarize)
iteration+=1
for tissue in tissue_specific_IDs:
if tissue not in tissue_specific_IDs_combined: ### Combine the tissue results from all of the cluster group analyses, not over-writing the existing
tissue_specific_IDs_combined[tissue] = tissue_specific_IDs[tissue]
correlations_combined[tissue] = interim_correlations[tissue]
tissue_specific_IDs={}; interim_correlations={}
for tissue in tissue_specific_IDs_combined:
for probeset in tissue_specific_IDs_combined[tissue]:
try: tissue_specific_IDs[probeset].append(tissue)
except Exception: tissue_specific_IDs[probeset] = [tissue]
for (probeset,symbol,(rho,p)) in correlations_combined[tissue]:
try: interim_correlations[tissue].append([probeset,symbol,(rho,p)])
except Exception: interim_correlations[tissue] = [[probeset,symbol,(rho,p)]]
analyze_housekeeping = 'yes'; compare_clusters = 'no'
original_tissue_headers2 = original_tissue_headers ### The last function will overwrite the group~ replacement
#identifyMarkers(filename,[]) ### Used to get housekeeping genes for all conditions
else:
tissue_specific_IDs,interim_correlations,annotation_headers,tissues = identifyMarkers(filename,[],binarize=binarize)
original_tissue_headers2 = original_tissue_headers
### Add a housekeeping set (genes that demonstrate expression with low variance
housekeeping.sort(); ranked_list=[]; ranked_lookup=[]; tissue = 'Housekeeping'
for (stdev,(probeset,symbol)) in housekeeping:
if probeset not in tissue_specific_IDs: ### Shouldn't be if it is a housekeeping gene
if symbol not in ranked_list:
ranked_list.append(symbol); ranked_lookup.append([probeset,symbol,(stdev,0)])
### Replicates code in identifyMarkers - but only applied to housekeeping genes to add those in addition to the existing ones in tissue_specific_IDs
for (probeset,symbol,(stdev,p)) in ranked_lookup[:genesToReport]:
try: tissue_specific_IDs[probeset].append(tissue)
except Exception: tissue_specific_IDs[probeset] = [tissue]
try: interim_correlations[tissue].append([probeset,symbol,(stdev,p)])
except Exception: interim_correlations[tissue] = [[probeset,symbol,(stdev,p)]]
### If no mean file provided
#print [use_replicates, filename, tissue]
if use_replicates:
if AvgExpDir != None:
try: filename = AvgExpDir
except Exception: pass ### For AltExon queries
try:
expression_relative,annotations,tissue_headers, annot_header = getArrayData(filename,tissue_specific_IDs)
if use_replicates:
original_tissue_headers2, annotation_headers = tissue_headers, annot_header
tissue_specific_IDs2 = copy.deepcopy(tissue_specific_IDs)
for probeset in tissue_specific_IDs2:
if probeset in annotations:
annotations[probeset]+=[string.join(list(tissue_specific_IDs[probeset]),'|')] ### Save as string
title_row = ['UID']+annotation_headers+['marker-in']+original_tissue_headers2
export_dir = exportMarkerGeneProfiles(filename,annotations,expression_relative,title_row)
except Exception,e:
#print traceback.format_exc()
pass
exportCorrelations(filename,interim_correlations)
if correlateAllGenes:
exportAllGeneCorrelations(filename,all_genes_ranked)
try: return export_dir
except Exception: pass
def getReplicateData(expr_input,t):
groups_dir = string.replace(expr_input,'exp.','groups.')
groups_dir = string.replace(groups_dir,'stats.','groups.')
groups_dir = string.replace(groups_dir,'topSplice.','groups.')
groups_dir = string.replace(groups_dir,'filter.','groups.')
groups_dir = string.replace(groups_dir,'-steady-state','') ### groups is for the non-steady-state file
#groups_dir = string.replace(groups_dir,'-average.txt','.txt') ### groups is for the non-steady-state file
if 'groups.' not in groups_dir and 'Alt1Results' in groups_dir:
parent_dir = string.split(expr_input,'Alt1Results')[0]
file = export.findFilename(expr_input)
file = string.replace(file,'AltExonConfirmed-','groups.')
file = string.replace(file,'AltExon-','groups.')
groups_dir = parent_dir+'ExpressionInput/'+file
group_index_db={}
splitHeaders=False
for i in t:
if '~' not in i:splitHeaders=True
### use comps in the future to visualize group comparison changes
sample_list,group_sample_db,group_db,group_name_sample_db,comp_groups,comps_name_db = ExpressionBuilder.simpleGroupImport(groups_dir,splitHeaders=splitHeaders, ignoreComps=True)
sample_list = t ### This is the actual order in the input expression files
for x in t:
try: group_name = group_db[x]
except Exception:
try:
y = string.split(x,':')[-1] ### for an alternative exon file with the name wt:sample1.bed
group_name = group_db[y]
except Exception: pass
try:
group_name = group_db[x]
sample_index = t.index(x)
try: group_index_db[group_name].append(sample_index)
except Exception: group_index_db[group_name] = [sample_index] ### dictionary of group to input file sample indexes
except Exception: pass
sample_template_db = createReplicateCorrelationTemplate(sample_list,group_index_db)
return sample_template_db,group_sample_db
def createReplicateCorrelationTemplate(samples,group_index_db):
### This create multiple binary indicates, but for replicates as opposed to an individual mean of multiple groups
sample_template_db={}
null_template = [0.00]*len(samples)
for group in group_index_db:
sample_template = list(null_template) ### creates a modifiable copy
group_indeces = group_index_db[group]
for index in group_indeces:
sample_template[index] = correlationDirection ### make -1 to inverse in silico pattern (anti-correlated)
sample_template_db[group] = sample_template
return sample_template_db
def selectiveFloats(values):
float_values=[]
for i in values:
try:
float_values.append(float(i))
except Exception: float_values.append(None)
return float_values
def binaryExp(value):
if value>1:
return 2
else:
return 0
def identifyMarkers(filename,cluster_comps,binarize=False):
""" This function is the real workhorse of markerFinder, which coordinates the correlation analyses and data import """
global tissue_scores; tissue_scores={}; print_interval=2000; print_limit=2000
fn=filepath(filename); x=0; k=0; probeset_db={}; tissues_with_lowest_expression={}; index_sets = []
global original_tissue_headers
global use_replicates
count=0
import gene_associations; from import_scripts import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
try: coding_db = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
except Exception: coding_db = {}
if 'ExpressionOutput' in filename:
use_replicates = False
else:
use_replicates = True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line) #remove endline
t = string.split(data,'\t')
if data[0] == '#':
x = 0
elif x == 0:
i=0
for h in t:
if 'Definition' in h: lti = i ### last tissue index
if 'Description' in h: lti = i ### last tissue index
if 'Select Protein Classes' in h: ct = i
i+=1
try: original_tissue_headers = t[1:lti]
except Exception:
### Occurs when analyzing a simple expression file with no annotations
original_tissue_headers = t[1:]
if len(cluster_comps) == 0: ### No group clusters to separately analyze present
tissues = list(original_tissue_headers)
else:
if len(cluster_comps)>1: ### 2 groups clusters to compare
indexes = cluster_comps[0]
tissues = t[indexes[0]:indexes[-1]+1]
index_sets = [[indexes[0],indexes[-1]+1]]
for cluster in cluster_comps:
if cluster>0:
indexes = cluster_comps[cluster]
tissues += t[indexes[0]:indexes[-1]+1]
index_sets.append([indexes[0],indexes[-1]+1])
#print tissues
print 'being analyzed now'
else: ### First reference set of tissues looked at
for cluster in cluster_comps: ### There is only one here!
indexes = cluster_comps[cluster]
tissues = t[indexes[0]:indexes[-1]+1]
index_sets = [[indexes[0],indexes[-1]+1]]
#print tissues;sys.exit()
print 'being analyzed only in round 1'
original_tissue_headers2=[]
for tissue in original_tissue_headers: ### This is the original full header for all clusters
try: cluster,tissue = string.split(tissue,'~')
except Exception: pass
original_tissue_headers2.append(tissue)
original_tissue_headers = original_tissue_headers2
try: annotation_headers = t[lti:]
except Exception: annotation_headers = []
if len(cluster_comps) > 0:
tissues2=[]
for tissue in tissues:
if '~' in tissue:
cluster, tissue = string.split(tissue,'~')
tissues2.append(tissue)
tissues = tissues2
#print tissues, cluster_comps;sys.exit()
if use_replicates:
if len(cluster_comps)>0:
tissue_template_db,group_sample_db = getReplicateData(fn,tissues)
else:
tissue_template_db,group_sample_db = getReplicateData(fn,original_tissue_headers)
try: annotation_db = getArrayData(AvgExpDir,[])[1]
except Exception: pass
else:
tissue_template_db = createCorrelationTemplate(tissues)
x = 1
else: #elif x<500:
probeset = t[0]; proceed = 'no'; symbol=''; geneID=''
try:
lti = len(tissues)+1
try: description,symbol=annotation_db[probeset][:2] ### See above annotation_db download
except Exception: symbol = probeset; description = ''
try:
probeset,geneID = string.split(probeset,':')
if 'ENS' in probeset:
geneID, probeset = probeset,geneID
probeset=geneID+':'+probeset
except Exception:
if 'ENS' in probeset:
geneID = probeset
try: symbol = gene_to_symbol[geneID][0]; description = ''
except Exception: pass
except Exception: symbol = probeset; description = ''
try: coding_class = coding_db[probeset][-1]
except Exception:
try:
geneID = symbol_to_gene[probeset][0]
symbol = probeset
coding_class = coding_db[geneID][-1]
except Exception:
coding_class = 'protein_coding'
except Exception: pass
if symbol =='' or symbol == probeset:
try: coding_class = t[ct]; symbol = t[lti+1]; description = t[lti]
except Exception: coding_class = 'protein_coding'
if coding_type == 'protein_coding':
if coding_type in coding_class:
if 'MT-' not in symbol and '.' not in symbol:
proceed = 'yes'
elif coding_type == 'AltExon':
proceed = 'yes'
else:
if 'protein_coding' not in coding_class and 'pseudogene' not in coding_class and len(description)>0:
if 'MT-' not in symbol and '.' not in symbol:
proceed = 'yes'
proceed = 'yes' ### Force it to anlayze all genes
count+=1
#if coding_class != 'protein_coding':
#print coding_class, coding_type, proceed, probeset, symbol, species, len(gene_to_symbol),coding_db[probeset];sys.exit()
#proceed = 'yes'
if len(coding_class) == 0 or proceed == 'yes':
if compare_clusters == 'yes':
exp_values=[]
for (i1,i2) in index_sets:
try: exp_values += map(float, t[i1:i2])
except Exception: exp_values+=selectiveFloats(t[i1:i2])
#print len(exp_values), len(tissues)
#print exp_values
#print tissues; kill
else:
try: exp_values = map(float, t[1:lti]) ### map allows you to apply the function to all elements in the object
except Exception: exp_values=selectiveFloats(t[1:lti])
if log_transform:
try: exp_values = map(lambda x: math.log(x,2), exp_values)
except Exception:
exp_values = map(lambda x: math.log(x+1,2), exp_values)
if binarize:
exp_values = map(lambda x: binaryExp(x), exp_values)
if analyze_housekeeping == 'yes': ### Only grab these when analyzing all tissues
findHousekeepingGenes((probeset,symbol),exp_values)
elif platform == 'RNASeq':# or platform == "3'array": ### Exclude low expression (RPKM) genes
if max(exp_values)>RPKM_threshold:
PearsonCorrelationAnalysis((probeset,symbol),exp_values,tissue_template_db)
else:
pass
#print max(exp_values), RPKM_threshold;sys.exit()
else:
if 'exp.' in filename:
try: PearsonCorrelationAnalysis((probeset,symbol),exp_values,tissue_template_db)
except Exception: ### For missing values
advancedPearsonCorrelationAnalysis((probeset,symbol),exp_values,tissue_template_db)
else:
advancedPearsonCorrelationAnalysis((probeset,symbol),exp_values,tissue_template_db)
x+=1
if x == print_limit:
#if print_limit == 2000: break
#print print_limit,'genes analyzed'
print '*',
print_limit+=print_interval
#print len(Added),len(Queried),len(tissue_scores),count;sys.exit()
tissue_specific_IDs={}; interim_correlations={}
gene_specific_rho_values = {}
tissue_list=[]
for tissue in tissue_scores:
tissue_scores[tissue].sort()
tissue_scores[tissue].reverse()
ranked_list=[]; ranked_lookup=[]
if tissue not in tissue_list: tissue_list.append(tissue) ### Keep track of the tissue order
for ((rho,p),(probeset,symbol)) in tissue_scores[tissue]:
if correlateAllGenes:
try: all_genes_ranked[probeset,symbol].append([(rho,p),tissue])
except Exception:all_genes_ranked[probeset,symbol] = [[(rho,p),tissue]]
### Get a matrix of all genes to correlations
try: gene_specific_rho_values[symbol].append(rho)
except Exception: gene_specific_rho_values[symbol] = [rho]
if symbol == '': symbol = probeset
#print tissue, tissue_scores[tissue];sys.exit()
if symbol not in ranked_list:
ranked_list.append(symbol); ranked_lookup.append([probeset,symbol,(rho,p)])
for (probeset,symbol,(rho,p)) in ranked_lookup[:genesToReport]: ### Here is where we would compare rho values between tissues with the same probesets
if rho>0.01 and p<0.1:
if compare_clusters == 'yes':
try: tissue_specific_IDs[tissue].append(probeset)
except Exception: tissue_specific_IDs[tissue] = [probeset]
else:
try: tissue_specific_IDs[probeset].append(tissue)
except Exception: tissue_specific_IDs[probeset] = [tissue]
try: interim_correlations[tissue].append([probeset,symbol,(rho,p)])
except Exception: interim_correlations[tissue] = [[probeset,symbol,(rho,p)]]
if correlateAllGenes: ### This was commented out - Not sure why - get an error downstream otherwise
for ID in all_genes_ranked:
ag = all_genes_ranked[ID]
ag.sort()
all_genes_ranked[ID] = ag[-1] ### topcorrelated
#"""
data = export.ExportFile(string.replace(filename[:-4]+'-all-correlations.txt','exp.','MarkerFinder.'))
data.write(string.join(tissue_list,'\t')+'\n')
for gene in gene_specific_rho_values:
data.write(string.join([gene]+map(str,gene_specific_rho_values[gene]),'\t')+'\n')
#sys.exit()
#"""
#print len(tissue_specific_IDs);sys.exit()
return tissue_specific_IDs,interim_correlations,annotation_headers,tissues
def exportMarkerGeneProfiles(original_filename,annotations,expression_relative,title_row):
destination_dir = 'AltDatabase/ensembl/'+species+'/' ### Original default
destination_dir = export.findParentDir(original_filename)
if 'Alt1Results' in original_filename: dataset_type = '_AltExon'
elif 'FullDatasets' in original_filename: dataset_type = '_AltExon'
else: dataset_type = ''
#filename = species+'_'+platform+'_tissue-specific'+dataset_type+'_'+coding_type+'.txt'
filename = 'MarkerFinder/MarkerGenes.txt'
try:
if use_replicates:
filename = string.replace(filename,'.txt','-ReplicateBased.txt')
else:
filename = string.replace(filename,'.txt','-MeanBased.txt')
except Exception: None
filename = destination_dir+filename
filename = string.replace(filename,'ExpressionInput','ExpressionOutput')
data = export.ExportFile(filename)
title_row = string.join(title_row,'\t')
data.write(title_row+'\n')
for probeset in expression_relative:
values = string.join([probeset]+annotations[probeset]+expression_relative[probeset],'\t')+'\n'
data.write(values)
data.close()
print '\nexported:',filepath(filename)
return filepath(filename)
def exportAllGeneCorrelations(filename,allGenesRanked):
destination_dir = export.findParentDir(filename)
filename = destination_dir+'MarkerFinder/AllGenes_correlations.txt'
filename = string.replace(filename,'ExpressionInput','ExpressionOutput')
try:
if use_replicates:
filename = string.replace(filename,'.txt','-ReplicateBased.txt')
else:
filename = string.replace(filename,'.txt','-MeanBased.txt')
except Exception: pass
data = export.ExportFile(filename)
title_row = string.join(['UID','Symbol','Pearson rho','Pearson p-value','Cell State'],'\t')
data.write(title_row+'\n')
rho_sorted=[]
for (probeset,symbol) in allGenesRanked:
try:
(rho,p),tissue = allGenesRanked[(probeset,symbol)]
except Exception:
### Applies to tiered analysis
allGenesRanked[(probeset,symbol)].sort()
(rho,p),tissue = allGenesRanked[(probeset,symbol)][-1]
values = string.join([probeset,symbol,str(rho),str(p),tissue],'\t')+'\n'
rho_sorted.append([(tissue,1.0/rho),values])
rho_sorted.sort()
for (x,values) in rho_sorted:
data.write(values)
data.close()
def exportCorrelations(original_filename,interim_correlations):
destination_dir = 'AltDatabase/ensembl/'+species+'/'
destination_dir = export.findParentDir(original_filename)
if 'Alt1Results' in original_filename: dataset_type = '_AltExon'
elif 'FullDatasets' in original_filename: dataset_type = '_AltExon'
else: dataset_type = ''
filename = species+'_'+platform+'_tissue-specific_correlations'+dataset_type+'_'+coding_type+'.txt'
filename = destination_dir+filename
filename = destination_dir+'MarkerFinder/MarkerGenes_correlations.txt'
filename = string.replace(filename,'ExpressionInput','ExpressionOutput')
try:
if use_replicates:
filename = string.replace(filename,'.txt','-ReplicateBased.txt')
else:
filename = string.replace(filename,'.txt','-MeanBased.txt')
except Exception: pass
data = export.ExportFile(filename)
title_row = string.join(['UID','Symbol','Pearson rho','Pearson p-value','Cell State'],'\t')
data.write(title_row+'\n')
for tissue in interim_correlations:
for key in interim_correlations[tissue]:
probeset,symbol,rho_p = key
rho,p = rho_p
values = string.join([probeset,symbol,str(rho),str(p),tissue],'\t')+'\n'
data.write(values)
data.close()
#print 'exported:',filepath(filename)
############### Second set of methods for extracting out average expression columns from initial RMA data ##########
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def getAverageExpressionValues(filename,platform):
""" This function imports two file sets: (A) The original raw input expression files and groups and (B) the DATASET file with annotations.