-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFullDataPreProcessor.py
More file actions
121 lines (96 loc) · 3.91 KB
/
FullDataPreProcessor.py
File metadata and controls
121 lines (96 loc) · 3.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 20:13:45 2020
@inproceedings{xu2019modeling,
title={Modeling Tabular data using Conditional GAN},
author={Xu, Lei and Skoularidou, Maria and Cuesta-Infante, Alfredo and Veeramachaneni, Kalyan},
booktitle={Advances in Neural Information Processing Systems},
year={2019}
}
@article{torfi2020cor,
title={COR-GAN: Correlation-Capturing Convolutional Neural Networks for Generating Synthetic Healthcare Records},
author={Torfi, Amirsina and Fox, Edward A},
journal={arXiv preprint arXiv:2001.09346},
year={2020}
}
"""
# Importing libraries and frameworks
import os
import numpy as np
import torch
import pandas as pd
from ctgan.data import read_csv
class FullDataPreProcessor:
def __init__(self, path, column_names, initial_identifier, num_of_rows, seed):
self.path = path
self.column_names = column_names
self.initial_identifier = initial_identifier
self.num_of_rows = num_of_rows
self.seed = seed
def get_dataframe(self):
df = read_csv(self.path)
# Getting all of the columns with regards to their dtype
non_numeric_columns = list(
df[0].select_dtypes(exclude=["int64", "float64"]).columns
)
numeric_int_columns = list(df[0].select_dtypes(include=["int64"]).columns)
numeric_float_columns = list(df[0].select_dtypes(include=["float64"]).columns)
# Filling in all of the missing data of type string
for j in range(len(non_numeric_columns)):
df[0][non_numeric_columns[j]].fillna("emptyblock", inplace=True)
# Filling in all of the missing data of type int
for k in range(len(numeric_int_columns)):
df[0][numeric_int_columns[k]].fillna(-123456789, inplace=True)
# Filling in all of the missing data of type float
for l in range(len(numeric_float_columns)):
df[0][numeric_float_columns[l]].fillna(-1234.56789, inplace=True)
return df
def sample_dataframe(self, dataframe):
# This part samples column-wise
if self.column_names != []:
dataframes = []
assert self.initial_identifier != "", (
"Initial Identifier not specified! Choose one of the following: "
+ str(list(dataframe.columns))
)
initial_df = pd.DataFrame(
{self.initial_identifier: dataframe[self.initial_identifier]}
)
dataframes.append(initial_df)
for column in self.column_names:
tmpdf = pd.DataFrame({column: dataframe[column]})
dataframes.append(tmpdf)
new_df = pd.concat(dataframes, axis=1)
else:
new_df = dataframe
# This part samples row-wise
if self.num_of_rows != -1:
assert self.num_of_rows > 0, "Number of rows must be greater than zero"
assert (
self.num_of_rows <= dataframe.shape[0]
), "Number of rows must less or equal to the total number of rows"
sampled_df = new_df.sample(self.num_of_rows, random_state=self.seed)
sampled_df.sort_index(inplace=True)
return sampled_df
else:
return new_df
class Dataset:
def __init__(self, data, transform=None):
# Transform
self.transform = transform
# load data here
self.data = data
self.sampleSize = data.shape[0]
self.featureSize = data.shape[1]
def return_data(self):
return self.data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self.data[idx]
sample = np.clip(sample, 0, 1)
if self.transform:
pass
return torch.from_numpy(sample)