forked from Nicolas-Radomski/GenomicBasedClassification
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathGenomicBasedClassification.py
More file actions
2891 lines (2778 loc) · 151 KB
/
GenomicBasedClassification.py
File metadata and controls
2891 lines (2778 loc) · 151 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# required librairies
## pip3.12 install --force-reinstall pandas==2.2.2
## pip3.12 install --force-reinstall imbalanced-learn==0.13.0
## pip3.12 install --force-reinstall scikit-learn==1.5.2
## pip3.12 install --force-reinstall xgboost==2.1.3
## pip3.12 install --force-reinstall lightgbm==4.6.0
## pip3.12 install --force-reinstall catboost==1.2.8
## pip3.12 install --force-reinstall numpy==1.26.4
## pip3.12 install --force-reinstall joblib==1.5.1
## pip3.12 install --force-reinstall tqdm==4.67.1
## pip3.12 install --force-reinstall tqdm-joblib==0.0.4
'''
# examples of commands
## for the ADA classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph phenotype_dataset.tsv -o MyDirectory -x ADA_FirstAnalysis -da random -sp 80 -c ADA -k 5 -pa tuning_parameters_ADA.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/ADA_FirstAnalysis_model.obj -f MyDirectory/ADA_FirstAnalysis_features.obj -fe MyDirectory/ADA_FirstAnalysis_feature_encoder.obj -o MyDirectory -x ADA_SecondAnalysis
## for the CAT classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x CAT_FirstAnalysis -da manual -fs SKB -c CAT -k 5 -pa tuning_parameters_CAT.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/CAT_FirstAnalysis_model.obj -f MyDirectory/CAT_FirstAnalysis_features.obj -fe MyDirectory/CAT_FirstAnalysis_feature_encoder.obj -o MyDirectory -x CAT_SecondAnalysis
## for the DT classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x DT_FirstAnalysis -da manual -fs laSFM -c DT -k 5 -pa tuning_parameters_DT.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/DT_FirstAnalysis_model.obj -f MyDirectory/DT_FirstAnalysis_features.obj -fe MyDirectory/DT_FirstAnalysis_feature_encoder.obj -o MyDirectory -x DT_SecondAnalysis
## for the EN classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x EN_FirstAnalysis -da manual -fs enSFM -c EN -k 5 -pa tuning_parameters_EN.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/EN_FirstAnalysis_model.obj -f MyDirectory/EN_FirstAnalysis_features.obj -fe MyDirectory/EN_FirstAnalysis_feature_encoder.obj -o MyDirectory -x EN_SecondAnalysis
## for the ET classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x ET_FirstAnalysis -da manual -fs riSFM -c ET -k 5 -pa tuning_parameters_ET.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/ET_FirstAnalysis_model.obj -f MyDirectory/ET_FirstAnalysis_features.obj -fe MyDirectory/ET_FirstAnalysis_feature_encoder.obj -o MyDirectory -x ET_SecondAnalysis
## for the GB classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x GB_FirstAnalysis -da manual -fs rfSFM -c GB -k 5 -pa tuning_parameters_GB.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/GB_FirstAnalysis_model.obj -f MyDirectory/GB_FirstAnalysis_features.obj -fe MyDirectory/GB_FirstAnalysis_feature_encoder.obj -o MyDirectory -x GB_SecondAnalysis
## for the GNB classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x GNB_FirstAnalysis -da manual -fs SKB -c GNB -k 5 -pa tuning_parameters_GNB.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/GNB_FirstAnalysis_model.obj -f MyDirectory/GNB_FirstAnalysis_features.obj -fe MyDirectory/GNB_FirstAnalysis_feature_encoder.obj -o MyDirectory -x GNB_SecondAnalysis
## for the HGB classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x HGB_FirstAnalysis -da manual -fs SKB -c HGB -k 5 -pa tuning_parameters_HGB.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/HGB_FirstAnalysis_model.obj -f MyDirectory/HGB_FirstAnalysis_features.obj -fe MyDirectory/HGB_FirstAnalysis_feature_encoder.obj -o MyDirectory -x HGB_SecondAnalysis
## for the KNN classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x KNN_FirstAnalysis -da manual -fs SKB -c KNN -k 5 -pa tuning_parameters_KNN.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/KNN_FirstAnalysis_model.obj -f MyDirectory/KNN_FirstAnalysis_features.obj -fe MyDirectory/KNN_FirstAnalysis_feature_encoder.obj -o MyDirectory -x KNN_SecondAnalysis
## for the LA classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x LA_FirstAnalysis -da manual -fs SKB -c LA -k 5 -pa tuning_parameters_LA.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/LA_FirstAnalysis_model.obj -f MyDirectory/LA_FirstAnalysis_features.obj -fe MyDirectory/LA_FirstAnalysis_feature_encoder.obj -o MyDirectory -x LA_SecondAnalysis
## for the LDA classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x LDA_FirstAnalysis -da manual -fs SKB -c LDA -k 5 -pa tuning_parameters_LDA.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/LDA_FirstAnalysis_model.obj -f MyDirectory/LDA_FirstAnalysis_features.obj -fe MyDirectory/LDA_FirstAnalysis_feature_encoder.obj -o MyDirectory -x LDA_SecondAnalysis
## for the LGBM classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x LGBM_FirstAnalysis -da manual -fs SKB -c LGBM -k 5 -pa tuning_parameters_LGBM.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/LGBM_FirstAnalysis_model.obj -f MyDirectory/LGBM_FirstAnalysis_features.obj -fe MyDirectory/LGBM_FirstAnalysis_feature_encoder.obj -o MyDirectory -x LGBM_SecondAnalysis
## for the LR classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x LR_FirstAnalysis -da manual -fs SKB -c LR -k 5 -pa tuning_parameters_LR.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/LR_FirstAnalysis_model.obj -f MyDirectory/LR_FirstAnalysis_features.obj -fe MyDirectory/LR_FirstAnalysis_feature_encoder.obj -o MyDirectory -x LR_SecondAnalysis
## for the MLP classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x MLP_FirstAnalysis -da manual -fs SKB -c MLP -k 5 -pa tuning_parameters_MLP.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/MLP_FirstAnalysis_model.obj -f MyDirectory/MLP_FirstAnalysis_features.obj -fe MyDirectory/MLP_FirstAnalysis_feature_encoder.obj -o MyDirectory -x MLP_SecondAnalysis
## for the NSV classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x NSV_FirstAnalysis -da manual -fs SKB -c NSV -k 5 -pa tuning_parameters_NSV.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/NSV_FirstAnalysis_model.obj -f MyDirectory/NSV_FirstAnalysis_features.obj -fe MyDirectory/NSV_FirstAnalysis_feature_encoder.obj -o MyDirectory -x NSV_SecondAnalysis
## for the QDA classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x QDA_FirstAnalysis -da manual -fs SKB -c QDA -k 5 -pa tuning_parameters_QDA.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/QDA_FirstAnalysis_model.obj -f MyDirectory/QDA_FirstAnalysis_features.obj -fe MyDirectory/QDA_FirstAnalysis_feature_encoder.obj -o MyDirectory -x QDA_SecondAnalysis
## for the RF classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x RF_FirstAnalysis -da manual -fs SKB -c RF -k 5 -pa tuning_parameters_RF.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/RF_FirstAnalysis_model.obj -f MyDirectory/RF_FirstAnalysis_features.obj -fe MyDirectory/RF_FirstAnalysis_feature_encoder.obj -o MyDirectory -x RF_SecondAnalysis
## for the RI classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x RI_FirstAnalysis -da manual -fs SKB -c RI -k 5 -pa tuning_parameters_RI.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/RI_FirstAnalysis_model.obj -f MyDirectory/RI_FirstAnalysis_features.obj -fe MyDirectory/RI_FirstAnalysis_feature_encoder.obj -o MyDirectory -x RI_SecondAnalysis
## for the SVC classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x SVC_FirstAnalysis -da manual -fs SKB -c SVC -k 5 -pa tuning_parameters_SVC.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/SVC_FirstAnalysis_model.obj -f MyDirectory/SVC_FirstAnalysis_features.obj -fe MyDirectory/SVC_FirstAnalysis_feature_encoder.obj -o MyDirectory -x SVC_SecondAnalysis
## for the XGB classifier
python3.12 GenomicBasedClassification.py modeling -m genomic_profiles_for_modeling.tsv -ph MyDirectory/ADA_FirstAnalysis_phenotype_dataset.tsv -o MyDirectory -x XGB_FirstAnalysis -da manual -fs SKB -c XGB -k 5 -pa tuning_parameters_XGB.txt -pi -nr 5
python3.12 GenomicBasedClassification.py prediction -m genomic_profiles_for_prediction.tsv -t MyDirectory/XGB_FirstAnalysis_model.obj -f MyDirectory/XGB_FirstAnalysis_features.obj -fe MyDirectory/XGB_FirstAnalysis_feature_encoder.obj -ce MyDirectory/XGB_FirstAnalysis_class_encoder.obj -o MyDirectory -x XGB_SecondAnalysis
'''
# import packages
## standard libraries
import sys as sys # no individual installation because is part of the Python Standard Library (no version)
import os as os # no individual installation because is part of the Python Standard Library (no version)
import datetime as dt # no individual installation because is part of the Python Standard Library (no version)
import argparse as ap # no individual installation because is part of the Python Standard Library
import pickle as pi # no individual installation because is part of the Python Standard Library
import warnings as wa # no individual installation because is part of the Python Standard Library (no version)
import re as re # no individual installation because is part of the Python Standard Library (with version)
import importlib.metadata as imp # no individual installation because is part of the Python Standard Library (no version)
import functools as ft # no individual installation because is part of the Python Standard Library (no version)
import contextlib as ctl # no individual installation because is part of the Python Standard Library (no version)
import io as io # no individual installation because is part of the Python Standard Library (no version)
import threadpoolctl as tpc # no individual installation because is part of the Python Standard Library (no version)
## third-party libraries
import pandas as pd
import imblearn as imb
import sklearn as sk
import xgboost as xgb
import lightgbm as lgbm
import numpy as np
import joblib as jl
import tqdm as tq
import tqdm.auto as tqa # no version because it corresponds a tqdm module
import tqdm_joblib as tqjl
import catboost as cb
from sklearn import set_config
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
from sklearn.model_selection import train_test_split, GridSearchCV, ParameterGrid
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix, classification_report, accuracy_score, precision_score, recall_score, f1_score, cohen_kappa_score, matthews_corrcoef, roc_auc_score, average_precision_score, precision_recall_curve
from sklearn.svm import SVC, NuSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, HistGradientBoostingClassifier, ExtraTreesClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.inspection import permutation_importance
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, chi2, f_regression, mutual_info_classif, SelectFromModel
from catboost import CatBoostClassifier, Pool
# compatibility patch: prevent GridSearchCV from injecting random_state into CatBoost
class SafeCatBoostClassifier(CatBoostClassifier):
"""a subclass of CatBoostClassifier that safely ignores sklearn random_state parameter."""
def set_params(self, **params):
# Drop sklearn’s automatic random_state injection to avoid CatBoostError
params.pop("random_state", None)
return super().set_params(**params)
# set static metadata to keep outside the main function
## set workflow repositories
repositories = 'Please cite:\n GitHub (https://github.com/Nicolas-Radomski/GenomicBasedClassification),\n Docker Hub (https://hub.docker.com/r/nicolasradomski/genomicbasedclassification),\n and/or Anaconda Hub (https://anaconda.org/nicolasradomski/genomicbasedclassification).'
## set the workflow context
context = "The scikit-learn (sklearn)-based Python workflow is inspired by an older caret-based R workflow (https://doi.org/10.1186/s12864-023-09667-w), independently supports both modeling (i.e., training and testing) and prediction (i.e., based on a pre-built model), and implements 5 feature selection methods, 20 model classifiers, hyperparameter tuning, performance metric computation, feature and permutation importance analyses, prediction probability estimation, execution monitoring via progress bars, and parallel processing."
## set the initial workflow reference
reference = "Pierluigi Castelli, Andrea De Ruvo, Andrea Bucciacchio, Nicola D'Alterio, Cesare Camma, Adriano Di Pasquale and Nicolas Radomski (2023) Harmonization of supervised machine learning practices for efficient source attribution of Listeria monocytogenes based on genomic data. 2023, BMC Genomics, 24(560):1-19, https://doi.org/10.1186/s12864-023-09667-w"
## set the acknowledgement
acknowledgements = "Many thanks to Andrea De Ruvo, Adriano Di Pasquale and ChatGPT for the insightful discussions that helped improve the algorithm."
# set the version and release
__version__ = "1.3.0"
__release__ = "December 2025"
# set global sklearn config early
set_config(transform_output="pandas")
# define functions of interest
def compute_curve_metrics(y_true, y_proba_nda, classes, digits, eps=1e-8):
"""
compute per-class ROC-AUC, PR-AUC, PRG-AUC and PRG-AUC_clipped for multi-class or binary predictions.
parameters
----------
y_true : array-like, shape (n_samples,)
True class labels (can be strings or integers).
y_proba_nda : array-like, shape (n_samples, n_classes)
Predicted probabilities for each class.
Must match the order of 'classes' columns.
classes : list of length n_classes
Class labels corresponding to columns of y_proba_nda.
digits : int
Number of decimal places to round the metrics.
eps : float, default=1e-8
Small value to clip probabilities to avoid constant arrays.
returns
-------
metrics_df : pd.DataFrame, shape (n_classes, 5)
DataFrame with columns:
['phenotype', 'ROC-AUC', 'PR-AUC', 'PRG-AUC', 'PRG-AUC_clipped'].
"""
# binarize labels for multi-class
# for binary classification, label_binarize returns a single column by default
y_true_bin_nda = label_binarize(y_true, classes=classes)
# fix for binary classification
# if there are exactly 2 classes and only one column was created,
# add the complement column to match the expected 2-column format
if y_true_bin_nda.shape[1] == 1 and len(classes) == 2:
y_true_bin_nda = np.hstack([1 - y_true_bin_nda, y_true_bin_nda])
# initialize results DataFrame
metrics_df = pd.DataFrame(columns=['phenotype', 'ROC-AUC', 'PR-AUC', 'PRG-AUC', 'PRG-AUC_clipped'])
# loop over each class to compute class-dependent curve-based metrics
for i, cls in enumerate(classes):
# extract binary true labels for this class
y_binary = y_true_bin_nda[:, i]
# extract predicted probabilities for this class
# clip probabilities to avoid exact 0 or 1 which may cause errors in metrics
y_scores = np.clip(y_proba_nda[:, i], eps, 1 - eps)
# compute ROC-AUC
try:
roc_auc = roc_auc_score(y_binary, y_scores)
except ValueError:
roc_auc = 0.0
# compute PR-AUC (Average Precision)
try:
pr_auc = average_precision_score(y_binary, y_scores)
except ValueError:
pr_auc = 0.0
# compute raw PRG-AUC (can be negative, zero, or positive)
try:
prg_auc, _, _, _ = compute_prg_auc(y_binary, y_scores)
except Exception:
prg_auc = 0.0
# compute clipped PRG-AUC (negative precision gains floored at 0)
try:
prg_auc_clipped, _, _, _ = compute_prg_auc(y_binary, y_scores, clip_negative=True)
except Exception:
prg_auc_clipped = 0.0
# store rounded results in the DataFrame
metrics_df.loc[i] = [
cls,
round(roc_auc, digits),
round(pr_auc, digits),
round(prg_auc, digits),
round(prg_auc_clipped, digits)
]
# return final per-class ROC-AUC, PR-AUC, PRG-AUC and PRG-AUC_clipped DataFrame
return metrics_df
def compute_prg_auc(y_true, y_scores, eps=1e-12, clip_negative=False):
"""
compute PRG curve and AUC using Flach & Kull (2015) formulation.
parameters
----------
y_true : array-like, shape (n_samples,)
True binary class labels (0 for negative, 1 for positive).
y_scores : array-like, shape (n_samples,)
Predicted probabilities for the positive class.
eps : float, default=1e-12
Small value added to avoid division by zero during gain computation.
clip_negative : bool, default=False
If True, negative precision gain values are clipped to 0 before integration.
returns
-------
auc_prg : float
Area under the Precision-Recall-Gain curve.
precision_gain : ndarray
Array of precision gain values at each threshold.
recall_gain : ndarray
Array of recall gain values at each threshold.
thresholds : ndarray
Decision thresholds corresponding to each precision-recall point.
"""
# proportion of positive samples (π)
pi = np.mean(y_true)
# compute standard precision-recall curve
precision, recall, thresholds = precision_recall_curve(y_true, y_scores)
# avoid division by zero in precision
precision = np.maximum(precision, eps)
# compute precision gain and recall gain
precision_gain = (precision - pi) / (1 - pi)
recall_gain = 1 - (1 - recall) / (1 - pi)
# numerical safety: replace NaN/Inf and clamp to [0,1]
precision_gain = np.nan_to_num(precision_gain, nan=0.0, posinf=0.0, neginf=0.0)
recall_gain = np.nan_to_num(recall_gain, nan=0.0, posinf=0.0, neginf=0.0)
precision_gain = np.clip(precision_gain, 0.0, 1.0)
recall_gain = np.clip(recall_gain, 0.0, 1.0)
# optionally clip negative precision gains to 0
if clip_negative:
precision_gain = np.maximum(precision_gain, 0)
# ensure recall_gain is in ascending order before integration
if recall_gain[0] > recall_gain[-1]:
recall_gain = recall_gain[::-1]
precision_gain = precision_gain[::-1]
# integrate PRG curve using trapezoidal rule
auc_prg = np.trapz(precision_gain, recall_gain)
return auc_prg, precision_gain, recall_gain, thresholds
def count_selected_features(pipeline, encoded_matrix):
"""
robust count of features the pipeline expects
returns the number of columns reaching the final estimator
handles both Pipeline objects and direct estimators
"""
# ensure the object is a pipeline; wrap standalone estimators
if not hasattr(pipeline, "named_steps"):
pipeline = Pipeline([("model", pipeline)])
# check if a feature selection step exists
if "feature_selection" in pipeline.named_steps:
fs = pipeline.named_steps["feature_selection"]
# support_ is the most reliable indicator
if hasattr(fs, "support_") and fs.support_ is not None:
return int(np.sum(fs.support_))
# convert to a NumPy float32 array (CAT issue)
try:
X_np = np.asarray(encoded_matrix, dtype=np.float32)
except Exception:
X_np = encoded_matrix.values.astype(np.float32)
# try transform on a single sample
try:
return fs.transform(X_np[:1]).shape[1]
except Exception:
pass
# fallback: try full transform
try:
return fs.transform(X_np).shape[1]
except Exception:
pass
# last resort: selector exists but unusable → full width
return int(X_np.shape[1])
# no explicit selector → check the estimator directly
est = pipeline.named_steps.get("model", pipeline)
n_feat = getattr(est, "n_features_in_", None)
# sklearn 1.3+ compatibility
if n_feat is None and hasattr(est, "feature_names_in_"):
n_feat = len(est.feature_names_in_)
# CatBoost, XGB, HGB often hide n_features_in_
if n_feat is None or n_feat == 0:
n_feat = encoded_matrix.shape[1]
return int(n_feat)
def restricted_float_split(x: str) -> float:
"""
convert *x* to float and ensure 0 < x < 100
raises
------
argparse.ArgumentTypeError
if *x* cannot be parsed as float or is not in (0, 100)
"""
try:
x = float(x)
except ValueError:
raise ap.ArgumentTypeError(f"{x!r} is not a valid float")
if not (0.0 < x < 100.0):
raise ap.ArgumentTypeError("split must be a float in the open interval (0, 100)")
return x
def restricted_int_limit(x: str) -> int:
"""
convert *x* to int and ensure x >= 1
raises
------
argparse.ArgumentTypeError
if *x* cannot be parsed as int or is less than 1
"""
try:
x = int(x)
except ValueError:
raise ap.ArgumentTypeError(f"{x!r} is not a valid integer")
if x < 1:
raise ap.ArgumentTypeError("limit must be an integer ≥ 1")
return x
def restricted_int_fold(x: str) -> int:
"""
convert *x* to int and ensure x ≥ 2
raises
------
argparse.ArgumentTypeError
if *x* cannot be parsed as int or is less than 2
"""
try:
x = int(x)
except ValueError:
raise ap.ArgumentTypeError(f"{x!r} is not a valid integer")
if x < 2:
raise ap.ArgumentTypeError("fold must be an integer ≥ 2 for cross-validation")
return x
def restricted_int_jobs(x: str) -> int:
"""
convert *x* to int and ensure x == -1 or x ≥ 1
raises
------
argparse.ArgumentTypeError
if *x* cannot be parsed as int or is not -1 or ≥ 1
"""
try:
x = int(x)
except ValueError:
raise ap.ArgumentTypeError(f"{x!r} is not a valid integer")
if x != -1 and x < 1:
raise ap.ArgumentTypeError("jobs must be -1 (all CPUs) or an integer ≥ 1")
return x
def restricted_int_nrepeats(x: str) -> int:
"""
convert *x* to int and ensure x ≥ 1
raises
------
argparse.ArgumentTypeError
if *x* cannot be parsed as int or is less than 1
"""
try:
x = int(x)
except ValueError:
raise ap.ArgumentTypeError(f"{x!r} is not a valid integer")
if x < 1:
raise ap.ArgumentTypeError("nrepeats must be an integer ≥ 1 for permutation importance")
return x
def restricted_int_digits(x: str) -> int:
"""
convert *x* to int and ensure x ≥ 0
raises
------
argparse.ArgumentTypeError
if *x* cannot be parsed as int or is negative.
"""
try:
x = int(x)
except ValueError:
raise ap.ArgumentTypeError(f"{x!r} is not a valid integer")
if x < 0:
raise ap.ArgumentTypeError("digits must be an integer ≥ 0")
return x
def restricted_debug_level(x: str) -> int:
"""
convert *x* to int and ensure x >= 0.
raises
------
argparse.ArgumentTypeError
if *x* cannot be parsed as int or is negative.
"""
try:
x = int(x)
except ValueError:
raise ap.ArgumentTypeError(f"{x!r} is not a valid integer")
if x < 0:
raise ap.ArgumentTypeError("debug must be zero or a positive integer (0, 1, 2, ...)")
return x
# create a main function preventing the global scope from being unintentionally executed on import
def main():
# step control
step1_start = dt.datetime.now()
# create the main parser
parser = ap.ArgumentParser(
prog="GenomicBasedClassification.py",
description="Perform classification-based modeling or prediction from binary (e.g., presence/absence of genes) or categorical (e.g., allele profiles) genomic data.",
epilog=repositories
)
# create subparsers object
subparsers = parser.add_subparsers(dest='subcommand')
# create the parser for the "training" subcommand
## get parser arguments
parser_modeling = subparsers.add_parser('modeling', help='Help about the model building.')
## define parser arguments
parser_modeling.add_argument(
'-m', '--mutations',
dest='inputpath_mutations',
action='store',
required=True,
help='Absolute or relative input path of tab-separated values (tsv) file including profiles of mutations. First column: sample identifiers identical to those in the input file of phenotypes and datasets (header: e.g., sample). Other columns: profiles of mutations (header: labels of mutations). [MANDATORY]'
)
parser_modeling.add_argument(
'-ph', '--phenotypes',
dest='inputpath_phenotypes',
action='store',
required=True,
help="Absolute or relative input path of tab-separated values (tsv) file including profiles of phenotypes and datasets. First column: sample identifiers identical to those in the input file of mutations (header: e.g., sample). Second column: categorical phenotype (header: e.g., phenotype). Third column: 'training' or 'testing' dataset (header: e.g., dataset). [MANDATORY]"
)
parser_modeling.add_argument(
'-da', '--dataset',
dest='dataset',
type=str,
action='store',
required=False,
choices=['random', 'manual'],
default='random',
help="Perform random (i.e., 'random') or manual (i.e., 'manual') splitting of training and testing datasets through the holdout method. [OPTIONAL, DEFAULT: 'random']"
)
parser_modeling.add_argument(
'-sp', '--split',
dest='splitting',
type=restricted_float_split, # control (0, 100) open interval
action='store',
required=False,
default=None,
help='Percentage of random splitting to prepare the training dataset through the holdout method. [OPTIONAL, DEFAULT: None]'
)
parser_modeling.add_argument(
'-l', '--limit',
dest='limit',
type=restricted_int_limit, # control >= 1
action='store',
required=False,
default=10,
help='Recommended minimum of samples per class in both the training and testing datasets to reliably estimate performance metrics. [OPTIONAL, DEFAULT: 10]'
)
parser_modeling.add_argument(
'-fs', '--featureselection',
dest='featureselection',
type=str,
action='store',
required=False,
default='None',
help='Acronym of the classification-compatible feature selection method to use: SelectKBest (SKB), SelectFromModel with lasso-regularized logistic regression (laSFM), SelectFromModel with elasticNet-regularized logistic regression (enSFM), SelectFromModel with ridge-regularized logistic regression (riSFM), or SelectFromModel with random forest (rfSFM). These methods are suitable for high-dimensional binary or categorical-encoded features. [OPTIONAL, DEFAULT: None]'
)
parser_modeling.add_argument(
'-c', '--classifier',
dest='classifier',
type=str,
action='store',
required=False,
default='XGB',
help='Acronym of the classifier to use among adaboost (ADA), catboost (CAT), decision tree (DT), elasticnet-regularized logistic regression (EN), extra trees (ET), gradient boosting (GB), gaussian naive bayes (GNB), histogram-based gradient boosting (HGB), k-nearest neighbors (KNN), lasso-regularized logistic regression (LA), linear discriminant analysis (LDA), light gradient boosting machine (LGBM), logistic regression (LR), multi-layer perceptron (MLP), nu support vector (NSV), quadratic discriminant analysis (QDA), random forest (RF), ridge-regularized logistic regression (RI), support vector classification (SVC) or extreme gradient boosting (XGB). [OPTIONAL, DEFAULT: XGB]'
)
parser_modeling.add_argument(
'-k', '--fold',
dest='fold',
type=restricted_int_fold, # control >= 2
action='store',
required=False,
default=5,
help='Value defining k-1 groups of samples used to train against one group of validation through the repeated k-fold cross-validation method. [OPTIONAL, DEFAULT: 5]'
)
parser_modeling.add_argument(
'-pa', '--parameters',
dest='parameters',
action='store',
required=False,
help='Absolute or relative input path of a text (txt) file including tuning parameters compatible with the param_grid argument of the GridSearchCV function. (OPTIONAL)'
)
parser_modeling.add_argument(
'-j', '--jobs',
dest='jobs',
type=restricted_int_jobs, # control -1 or >= 1
action='store',
required=False,
default=-1,
help='Value defining the number of jobs to run in parallel compatible with the n_jobs argument of the GridSearchCV function. [OPTIONAL, DEFAULT: -1]'
)
parser_modeling.add_argument(
'-pi', '--permutationimportance',
dest='permutationimportance',
action='store_true',
required=False,
default=False,
help='Compute permutation importance, which can be computationally expensive, especially with many features and/or high repetition counts. [OPTIONAL, DEFAULT: False]'
)
parser_modeling.add_argument(
'-nr', '--nrepeats',
dest='nrepeats',
type=restricted_int_nrepeats, # control >= 1
action='store',
required=False,
default=10,
help='Number of repetitions per feature for permutation importance; higher values provide more stable estimates but increase runtime. [OPTIONAL, DEFAULT: 10]'
)
parser_modeling.add_argument(
'-o', '--output',
dest='outputpath',
action='store',
required=False,
default='.',
help='Output path. [OPTIONAL, DEFAULT: .]'
)
parser_modeling.add_argument(
'-x', '--prefix',
dest='prefix',
action='store',
required=False,
default='output',
help='Prefix of output files. [OPTIONAL, DEFAULT: output]'
)
parser_modeling.add_argument(
'-di', '--digits',
dest='digits',
type=restricted_int_digits, # control >= 0
action='store',
required=False,
default=6,
help='Number of decimal digits to round numerical results (e.g., accuracy, importance, metrics). [OPTIONAL, DEFAULT: 6]'
)
parser_modeling.add_argument(
'-de', '--debug',
dest='debug',
type=restricted_debug_level, # control >= 0
action='store',
required=False,
default=0,
help='Traceback level when an error occurs. [OPTIONAL, DEFAULT: 0]'
)
parser_modeling.add_argument(
'-w', '--warnings',
dest='warnings',
action='store_true',
required=False,
default=False,
help='Do not ignore warnings if you want to improve the script. [OPTIONAL, DEFAULT: False]'
)
parser_modeling.add_argument(
'-nc', '--no-check',
dest='nocheck',
action='store_true',
required=False,
default=False,
help='Do not check versions of Python and packages. [OPTIONAL, DEFAULT: False]'
)
# create the parser for the "prediction" subcommand
## get parser arguments
parser_prediction = subparsers.add_parser('prediction', help='Help about the model-based prediction.')
## define parser arguments
parser_prediction.add_argument(
'-m', '--mutations',
dest='inputpath_mutations',
action='store',
required=True,
help='Absolute or relative input path of a tab-separated values (tsv) file including profiles of mutations. First column: sample identifiers identical to those in the input file of phenotypes and datasets (header: e.g., sample). Other columns: profiles of mutations (header: labels of mutations). [MANDATORY]'
)
parser_prediction.add_argument(
'-f', '--features',
dest='inputpath_features',
action='store',
required=True,
help='Absolute or relative input path of an object (obj) file including features from the training dataset (i.e., mutations). [MANDATORY]'
)
parser_prediction.add_argument(
'-fe', '--featureencoder',
dest='inputpath_feature_encoder',
action='store',
required=True,
help='Absolute or relative input path of an object (obj) file including encoder from the training dataset (i.e., mutations). [MANDATORY]'
)
parser_prediction.add_argument(
'-t', '--model',
dest='inputpath_model',
action='store',
required=True,
help='Absolute or relative input path of an object (obj) file including a trained scikit-learn model. [MANDATORY]'
)
parser_prediction.add_argument(
'-ce', '--classencoder',
dest='inputpath_class_encoder',
action='store',
required=False,
help='Absolute or relative input path of an object (obj) file including trained scikit-learn class encoder (i.e., phenotypes) for the XGB model. [OPTIONAL]'
)
parser_prediction.add_argument(
'-o', '--output',
dest='outputpath',
action='store',
required=False,
default='.',
help='Absolute or relative output path. [OPTIONAL, DEFAULT: .]'
)
parser_prediction.add_argument(
'-x', '--prefix',
dest='prefix',
action='store',
required=False,
default='output',
help='Prefix of output files. [OPTIONAL, DEFAULT: output_]'
)
parser_prediction.add_argument(
'-di', '--digits',
dest='digits',
type=restricted_int_digits, # control >= 0
action='store',
required=False,
default=6,
help='Number of decimal digits to round numerical results (e.g., accuracy, importance, metrics). [OPTIONAL, DEFAULT: 6]'
)
parser_prediction.add_argument(
'-de', '--debug',
dest='debug',
type=restricted_debug_level, # control >= 0
action='store',
required=False,
default=0,
help='Traceback level when an error occurs. [OPTIONAL, DEFAULT: 0]'
)
parser_prediction.add_argument(
'-w', '--warnings',
dest='warnings',
action='store_true',
required=False,
default=False,
help='Do not ignore warnings if you want to improve the script. [OPTIONAL, DEFAULT: False]'
)
parser_prediction.add_argument(
'-nc', '--no-check',
dest='nocheck',
action='store_true',
required=False,
default=False,
help='Do not check versions of Python and packages. [OPTIONAL, DEFAULT: False]'
)
# print help if there are no arguments in the command
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
# reshape arguments
## parse the arguments
args = parser.parse_args()
## rename arguments
if args.subcommand == 'modeling':
INPUTPATH_MUTATIONS=args.inputpath_mutations
INPUTPATH_PHENOTYPES=args.inputpath_phenotypes
OUTPUTPATH=args.outputpath
DATASET=args.dataset
SPLITTING=args.splitting
LIMIT=args.limit
FEATURESELECTION = args.featureselection
CLASSIFIER=args.classifier
FOLD=args.fold
PARAMETERS=args.parameters
JOBS=args.jobs
PERMUTATIONIMPORTANCE=args.permutationimportance
NREPEATS=args.nrepeats
PREFIX=args.prefix
DIGITS=args.digits
DEBUG=args.debug
WARNINGS=args.warnings
NOCHECK=args.nocheck
elif args.subcommand == 'prediction':
INPUTPATH_MUTATIONS=args.inputpath_mutations
INPUTPATH_FEATURES=args.inputpath_features
INPUTPATH_FEATURE_ENCODER=args.inputpath_feature_encoder
INPUTPATH_CLASS_ENCODER=args.inputpath_class_encoder
INPUTPATH_MODEL=args.inputpath_model
OUTPUTPATH=args.outputpath
PREFIX=args.prefix
DIGITS=args.digits
DEBUG=args.debug
WARNINGS=args.warnings
NOCHECK=args.nocheck
# print a message about release
message_release = "The GenomicBasedClassification script, version " + __version__ + " (released in " + __release__ + ")," + " was launched"
print(message_release)
# set tracebacklimit
sys.tracebacklimit = DEBUG
message_traceback = "The traceback level was set to " + str(sys.tracebacklimit)
print(message_traceback)
# management of warnings
if WARNINGS == True :
wa.filterwarnings('default')
message_warnings = "The warnings were not ignored"
print(message_warnings)
elif WARNINGS == False :
wa.filterwarnings('ignore')
message_warnings = "The warnings were ignored"
print(message_warnings)
# control versions
if NOCHECK == False :
## control Python version
if sys.version_info[0] != 3 or sys.version_info[1] != 12 :
raise Exception("Python 3.12 version is recommended")
# control versions of packages
if ap.__version__ != "1.1":
raise Exception("argparse 1.1 (1.4.1) version is recommended")
if pi.format_version != "4.0":
raise Exception("pickle 4.0 version is recommended")
if pd.__version__ != "2.2.2":
raise Exception("pandas 2.2.2 version is recommended")
if imb.__version__ != "0.13.0":
raise Exception("imblearn 0.13.0 version is recommended")
if sk.__version__ != "1.5.2":
raise Exception("sklearn 1.5.2 version is recommended")
if xgb.__version__ != "2.1.3":
raise Exception("xgboost 2.1.3 version is recommended")
if np.__version__ != "1.26.4":
raise Exception("numpy 1.26.4 version is recommended")
if jl.__version__ != "1.5.1":
raise Exception("joblib 1.5.1 version is recommended")
if lgbm.__version__ != "4.6.0":
raise Exception("lightgbm 4.6.0 version is recommended")
if tq.__version__ != "4.67.1":
raise Exception("tqdm 4.67.1 version is recommended")
if imp.version("tqdm-joblib") != "0.0.4":
raise Exception("tqdm-joblib 0.0.4 version is recommended")
if imp.version("catboost") != "1.2.8":
raise Exception("catboost 1.2.8 version is recommended")
message_versions = "The recommended versions of Python and packages were properly controlled"
elif NOCHECK == True :
message_versions = "The recommended versions of Python and packages were not controlled"
# print a message about version control
print(message_versions)
# set rounded digits
digits = DIGITS
# check the subcommand and execute corresponding code
if args.subcommand == 'modeling':
# print a message about subcommand
message_subcommand = "The modeling subcommand was used"
print(message_subcommand)
# manage minimal limits of samples
if LIMIT < 10:
message_limit = (
"The provided sample limit per class and dataset (i.e., " + str(LIMIT) + ") was below the recommended minimum (i.e., 10) and may lead to unreliable performance metrics"
)
print(message_limit)
else:
message_limit = (
"The provided sample limit per class and dataset (i.e., " + str(LIMIT) + ") meets or exceeds the recommended minimum (i.e., 10), which is expected to support more reliable performance metrics"
)
print(message_limit)
# read input files
## mutations
df_mutations = pd.read_csv(INPUTPATH_MUTATIONS, sep='\t', dtype=str)
## phenotypes
df_phenotypes = pd.read_csv(INPUTPATH_PHENOTYPES, sep='\t', dtype=str)
# indentify the type of phenotype classes
## make sure that the phenotype is provided in the second column
if df_phenotypes.shape[1] < 2:
message_number_phenotype_classes = "The presence of phenotype in the input file of phenotypes was improperly controlled (i.e., the second column is missing)"
raise Exception(message_number_phenotype_classes)
## count the phenotype classes
### count each phenotype classes
counts_each_classes_series = df_phenotypes.groupby(df_phenotypes.columns[1]).size()
### count classes
counts_classes_int = len(counts_each_classes_series.index)
### retrieve phenotype classes as string
classes_str = str(counts_each_classes_series.index.astype(str).tolist()).replace("[", "").replace("]", "")
### define the type of phenotype classes
if counts_classes_int == 2:
type_phenotype_classes = 'two classes'
message_number_phenotype_classes = "The provided phenotype harbored " + str(counts_classes_int) + " classes: " + classes_str
print(message_number_phenotype_classes)
elif counts_classes_int > 2:
type_phenotype_classes = 'more than two classes'
message_number_phenotype_classes = "The provided phenotype harbored " + str(counts_classes_int) + " classes: " + classes_str
print(message_number_phenotype_classes)
elif counts_classes_int == 1:
message_number_phenotype_classes = "The provided phenotype classes must be higher or equal to two"
raise Exception(message_number_phenotype_classes)
# define minimal limites of samples (i.e., 2 * counts_classes_int * LIMIT per class)
limit_samples = 2 * counts_classes_int * LIMIT
# check the input file of mutations
## calculate the number of rows
rows_mutations = len(df_mutations)
## calculate the number of columns
columns_mutations = len(df_mutations.columns)
## check if more than limit_samples rows and 3 columns
if (rows_mutations >= limit_samples) and (columns_mutations >= 3):
message_input_mutations = "The minimum required number of samples in the training/testing datasets (i.e., >= " + str(limit_samples) + ") and the expected number of columns (i.e., >= 3) in the input file of mutations were properly controlled (i.e., " + str(rows_mutations) + " and " + str(columns_mutations) + " , respectively)"
print (message_input_mutations)
else:
message_input_mutations = "The minimum required number of samples in the training/testing datasets (i.e., >= " + str(limit_samples) + ") and the expected number of columns (i.e., >= 3) in the input file of mutations were not properly controlled (i.e., " + str(rows_mutations) + " and " + str(columns_mutations) + " , respectively)"
raise Exception(message_input_mutations)
# check the input file of phenotypes
## calculate the number of rows
rows_phenotypes = len(df_phenotypes)
## calculate the number of columns
columns_phenotypes = len(df_phenotypes.columns)
## check if more than limit_samples rows and 3 columns
if (rows_phenotypes >= limit_samples) and (columns_phenotypes == 3):
message_input_phenotypes = "The minimum required number of samples in the training/testing datasets (i.e., >= " + str(limit_samples) + ") and the expected number of columns (i.e., = 3) in the input file of phenotypes were properly controlled (i.e., " + str(rows_phenotypes) + " and " + str(columns_phenotypes) + " , respectively)"
print (message_input_phenotypes)
else:
message_input_phenotypes = "The minimum required number of samples in the training/testing datasets (i.e., >= " + str(limit_samples) + ") and the expected number of columns (i.e., = 3) in the input file of phenotypes were not properly controlled (i.e., " + str(rows_phenotypes) + " and " + str(columns_phenotypes) + " , respectively)"
raise Exception(message_input_phenotypes)
## check the absence of missing data in the second column (i.e., phenotype)
missing_phenotypes = pd.Series(df_phenotypes.iloc[:,1]).isnull().values.any()
if missing_phenotypes == False:
message_missing_phenotypes = "The absence of missing phenotypes in the input file of phenotypes was properly controled (i.e., the second column)"
print (message_missing_phenotypes)
elif missing_phenotypes == True:
message_missing_phenotypes = "The absence of missing phenotypes in the input file of phenotypes was inproperly controled (i.e., the second column)"
raise Exception(message_missing_phenotypes)
## check the absence of values other than 'training' or 'testing' in the third column (i.e., dataset)
if (DATASET == "manual"):
expected_datasets = all(df_phenotypes.iloc[:,2].isin(["training", "testing"]))
if expected_datasets == True:
message_expected_datasets = "The expected datasets (i.e., 'training' or 'testing') in the input file of phenotypes were properly controled (i.e., the third column)"
print (message_expected_datasets)
elif expected_datasets == False:
message_expected_datasets = "The expected datasets (i.e., 'training' or 'testing') in the input file of phenotypes were inproperly controled (i.e., the third column)"
raise Exception(message_expected_datasets)
elif (DATASET == "random"):
message_expected_datasets = "The expected datasets (i.e., 'training' or 'testing') in the input file of phenotypes were not controled (i.e., the third column)"
print (message_expected_datasets)
# replace missing genomic data by a string
df_mutations = df_mutations.fillna('missing')
# rename variables of headers
## mutations
df_mutations.rename(columns={df_mutations.columns[0]: 'sample'}, inplace=True)
## phenotypes
df_phenotypes.rename(columns={df_phenotypes.columns[0]: 'sample'}, inplace=True)
df_phenotypes.rename(columns={df_phenotypes.columns[1]: 'phenotype'}, inplace=True)
df_phenotypes.rename(columns={df_phenotypes.columns[2]: 'dataset'}, inplace=True)
# sort by samples
## mutations
df_mutations = df_mutations.sort_values(by='sample')
## phenotypes
df_phenotypes = df_phenotypes.sort_values(by='sample')
# check if lists of sorted samples are identical
## convert DataFrame column as a list
lst_mutations = df_mutations['sample'].tolist()
lst_phenotypes = df_phenotypes['sample'].tolist()
## compare lists
if lst_mutations == lst_phenotypes:
message_sample_identifiers = "The sorted sample identifiers were confirmed as identical between the input files of mutations and phenotypes/datasets"
print (message_sample_identifiers)
else:
message_sample_identifiers = "The sorted sample identifiers were confirmed as not identical between the input files of mutations and phenotypes/datasets"
raise Exception(message_sample_identifiers)
# transform the phenotype classes into phenotype numbers for the XGB model
if CLASSIFIER == 'XGB':
class_encoder = LabelEncoder()
df_phenotypes["phenotype"] = class_encoder.fit_transform(df_phenotypes["phenotype"])
encoded_classes = class_encoder.classes_
message_class_encoder = "The phenotype classes were encoded for the XGB classifier (i.e., 0, 1, 2 ....): " + str(", ".join(f"'{item}'" for item in encoded_classes))
print(message_class_encoder)
else:
message_class_encoder = "The phenotype classes were not encoded for the classifiers other than the XGB classifier"
print(message_class_encoder)
# check compatibility between the dataset and splitting arguments
if (DATASET == 'random') and (SPLITTING != None):
message_compatibility_dataset_slitting = "The provided selection of training/testing datasets (i.e., " + DATASET + ") and percentage of random splitting (i.e., " + str(SPLITTING) + "%) were compatible"
print(message_compatibility_dataset_slitting)
elif (DATASET == 'random') and (SPLITTING == None):
message_compatibility_dataset_slitting = "The provided selection of training/testing datasets (i.e., " + DATASET + ") required the percentage of random splitting (i.e., " + str(SPLITTING) + ")"
raise Exception(message_compatibility_dataset_slitting)
elif (DATASET == 'manual') and (SPLITTING == None):
message_compatibility_dataset_slitting = "The provided selection of training/testing datasets (i.e., " + DATASET + ") and percentage of random splitting (i.e., " + str(SPLITTING) + ") were compatible"
print(message_compatibility_dataset_slitting)
elif (DATASET == 'manual') and (SPLITTING != None):
message_compatibility_dataset_slitting = "The provided selection of training/testing datasets (i.e., " + DATASET + ") did not require the percentage of random splitting (i.e., " + str(SPLITTING) + "%)"
raise Exception(message_compatibility_dataset_slitting)
# perform splitting of the training and testing datasets according to the setting
if DATASET == 'random':
message_dataset = "The training and testing datasets were constructed based on the 'random' setting"
print(message_dataset)
# drop dataset column (since it's not needed)
df_phenotypes = df_phenotypes.drop("dataset", axis='columns')
# merge phenotypes and mutations deterministically
df_all = (
pd.merge(df_phenotypes, df_mutations, on="sample", how="inner")
.sort_values(by="sample")
.reset_index(drop=True)
)
# do not normalize df_all["dataset"] here because it does not exist for random split
# create the dataframes mutations (X) and phenotypes (y)
X = df_all.drop(columns=["phenotype"]) # keep "sample" for now
y = df_all[["sample", "phenotype"]] # include sample column explicitly
# index with sample identifiers
X.set_index("sample", inplace=True)
y.set_index("sample", inplace=True)
# split the dataset into training and testing sets (without random_state=42 to avoid reproducibility)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, train_size=SPLITTING / 100
)
# convert y_train and y_test into clean 1-D Series indexed by sample
y_train = y_train["phenotype"]
y_test = y_test ["phenotype"]
elif DATASET == 'manual':
message_dataset = "The training and testing datasets were constructed based on the 'manual' setting"
print(message_dataset)
# merge phenotypes and mutations deterministically
df_all = (
pd.merge(df_phenotypes, df_mutations, on="sample", how="inner")
.sort_values(by="sample")
.reset_index(drop=True)
)
# normalize only here (since dataset column exists)
df_all["dataset"] = df_all["dataset"].astype(str).str.strip().str.lower()
# split according to dataset column
df_training = df_all[df_all["dataset"] == "training"]
df_testing = df_all[df_all["dataset"] == "testing"]
# build X and y dataframes for training/testing
## extract numerical genomic features and set sample identifiers as index
X_train = df_training.drop(columns=["phenotype", "dataset"]).set_index("sample")
X_test = df_testing .drop(columns=["phenotype", "dataset"]).set_index("sample")
## extract phenotype as a clean 1-D Series indexed by sample
y_train = df_training[["sample", "phenotype"]].set_index("sample")["phenotype"]
y_test = df_testing [ ["sample", "phenotype"]].set_index("sample")["phenotype"]
# check number of samples per class
## retrieve a list of unique classes
### transform a dataframe column into a list
classes_unique_lst = df_phenotypes['phenotype'].tolist()
### remove replicates
classes_unique_lst = list(set(classes_unique_lst))
### sort by alphabetic order
classes_unique_lst.sort()
## count classes
### in the whole dataset
count_dataset_lst = df_phenotypes['phenotype'].value_counts().reindex(classes_unique_lst, fill_value=0).tolist()
### in the training dataset
#### ensure y_train behaves as 1-D phenotype Series
if isinstance(y_train, pd.DataFrame):
y_train_series = y_train["phenotype"]
else:
y_train_series = y_train