-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtransforms.py
More file actions
159 lines (130 loc) · 5.13 KB
/
transforms.py
File metadata and controls
159 lines (130 loc) · 5.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms, utils
import torch
import os
import numpy as np
import random as r
class VMPData(Dataset):
def __init__(self, path, transform=None):
self.fileslist = os.listdir(path)
# TODO: optimize this class to not read whole data at the same time
self.files = [np.load(os.path.join(path, file), allow_pickle = True) for file in self.fileslist if file.endswith('.npy') and not file.startswith("mapping")]
self.data = []
self.filenames = []
self.labels = []
self.transform = transform
for index, filename_r_x in enumerate(self.files):
for element in filename_r_x:
self.labels.append(torch.tensor(np.array(index), dtype = torch.long))
self.filenames.append(element[0])
self.data.append([element[1], element[2]])
def __getitem__(self, index):
if self.transform:
return [self.transform(self.data[index]), self.labels[index], self.filenames[index]]
return [self.data[index], self.labels[index], self.filenames[index]]
def __len__(self):
return len(self.data)
class VMPDataWideSlim(VMPData):
def __getitem__(self, index):
if self.transform:
return [self.transform(self.data[index])[0], self.transform(self.data[index])[1], self.filenames[index]]
return [self.data[index], self.data[index], self.filenames[index]]
class VMPDataWideSlimCNN(VMPData):
def __init__(self, path, transform=None):
super().__init__(path, transform)
def __getitem__(self, index):
A, B = self.data[index]
if self.transform:
A, B = self.transform([A, B])
return A, B, self.filenames[index]
class ToTensorCNN:
def __call__(self, data):
A, B = data # list of signals
# np. A = [array(seq_len), array(seq_len), ...] for channels
A = torch.tensor(np.stack(A), dtype=torch.float32) # (channels_in, seq_len)
B = torch.tensor(np.stack(B), dtype=torch.float32) # (channels_out, seq_len)
return A, B
class GetChannelsForWideSlimPred(object):
def __init__(self, channels_wide, channels_slim, modality):
self.channels_wide = channels_wide
self.channels_slim = channels_slim
def __call__(self, data):
r_reads_wide = data[0][self.channels_wide]
x_reads_wide = data[1][self.channels_wide]
r_reads_slim = data[0][self.channels_slim]
x_reads_slim = data[1][self.channels_slim]
return [[r_reads_wide, x_reads_wide], [r_reads_slim, x_reads_slim]]
class StackSignalsForWideSlimPred(object):
def __call__(self, data):
r_stacked_wide = np.hstack(data[0][0])
x_stacked_wide = np.hstack(data[0][1])
r_stacked_slim = np.hstack(data[1][0])
x_stacked_slim = np.hstack(data[1][1])
joined_rx_wide = [r_stacked_wide, x_stacked_wide]
joined_rx_slim = [r_stacked_slim, x_stacked_slim]
return np.hstack(joined_rx_wide), np.hstack(joined_rx_slim)
class ToTensorWideSlim(object):
def __call__(self, signal):
return torch.tensor(signal[0], dtype = torch.float32), torch.tensor(signal[1], dtype = torch.float32)
class GetChannels(object):
def __init__(self, channels):
self.channels = channels
def __call__(self, data):
r_reads = data[0][self.channels]
x_reads = data[1][self.channels]
return [r_reads, x_reads]
class RollAllChannels(object):
def __call__(self, r_x_reads):
r_x_reads = r_x_reads
r = r_x_reads[0]
x = r_x_reads[1]
if np.random.rand() > .5:
return r_x_reads
self._calc_to_move(r[0])
r = [np.array(np.roll(sig, self.to_move)) for sig in r]
x = [np.array(np.roll(sig, self.to_move)) for sig in x]
return [r, x]
def _calc_to_move(self, signal):
self.padding_size = self._calc_padding(signal)
self.to_move = self._draw_number_to_move()
def _draw_number_to_move(self):
to_move = r.randint(0, self.padding_size)
return to_move
def _calc_padding(self, signal):
start, stop = self._get_first_last_nonzero_index(signal)
values_len = len(range(start, stop))
pad_len = len(signal) - values_len
return pad_len
def _get_first_last_nonzero_index(self, array):
nonzero_indxs = np.nonzero(array)
first = nonzero_indxs[0][0]
last = nonzero_indxs[0][-1]
return first, last
class StackSignals(object):
def __call__(self, data):
r_stacked = np.hstack(data[0])
x_stacked = np.hstack(data[1])
joined_rx = [r_stacked, x_stacked]
return np.hstack(joined_rx)
class ReduceSamplingRate():
def __init__(self, reduce_factor):
self.reduce_factor = reduce_factor
def __call__(self, data):
signal = data[::self.reduce_factor]
return signal
class Absolute(object):
def __call__(self, stacked_signal):
return np.absolute(stacked_signal)
class Scale(object):
def __init__(self, scale):
self.scale = scale
def __call__(self, stacked_signal):
return stacked_signal * self.scale
class AddOffset(object):
def __init__(self, offset):
self.offset = offset
def __call__(self, stacked_signal):
return [sample + self.offset for sample in stacked_signal]
class ToTensor(object):
def __call__(self, signal):
return torch.tensor(signal, dtype = torch.float32)