-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathITrackerData.py
More file actions
162 lines (125 loc) · 5.78 KB
/
ITrackerData.py
File metadata and controls
162 lines (125 loc) · 5.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import torch.utils.data as data
import scipy.io as sio
from PIL import Image
import os
import os.path
import torchvision.transforms as transforms
import torch
import numpy as np
import re
'''
Data loader for the iTracker.
Use prepareDataset.py to convert the dataset from http://gazecapture.csail.mit.edu/ to proper format.
Author: Petr Kellnhofer ( pkel_lnho (at) gmai_l.com // remove underscores and spaces), 2018.
Website: http://gazecapture.csail.mit.edu/
Cite:
Eye Tracking for Everyone
K.Krafka*, A. Khosla*, P. Kellnhofer, H. Kannan, S. Bhandarkar, W. Matusik and A. Torralba
IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016
@inproceedings{cvpr2016_gazecapture,
Author = {Kyle Krafka and Aditya Khosla and Petr Kellnhofer and Harini Kannan and Suchendra Bhandarkar and Wojciech Matusik and Antonio Torralba},
Title = {Eye Tracking for Everyone},
Year = {2016},
Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}
}
'''
MEAN_PATH = './'
def loadMetadata(filename, silent = False):
try:
# http://stackoverflow.com/questions/6273634/access-array-contents-from-a-mat-file-loaded-using-scipy-io-loadmat-python
if not silent:
print('\tReading metadata from %s...' % filename)
metadata = sio.loadmat(filename, squeeze_me=True, struct_as_record=False)
except:
print('\tFailed to read the meta file "%s"!' % filename)
return None
return metadata
class SubtractMean(object):
"""Normalize an tensor image with mean.
"""
def __init__(self, meanImg):
self.meanImg = transforms.ToTensor()(meanImg / 255)
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
return tensor.sub(self.meanImg)
class ITrackerData(data.Dataset):
def __init__(self, dataPath, split = 'train', imSize=(224,224), gridSize=(25, 25)):
self.dataPath = dataPath
self.imSize = imSize
self.gridSize = gridSize
print('Loading iTracker dataset...')
metaFile = os.path.join(dataPath, 'metadata.mat')
#metaFile = 'metadata.mat'
if metaFile is None or not os.path.isfile(metaFile):
raise RuntimeError('There is no such file %s! Provide a valid dataset path.' % metaFile)
self.metadata = loadMetadata(metaFile)
if self.metadata is None:
raise RuntimeError('Could not read metadata file %s! Provide a valid dataset path.' % metaFile)
self.faceMean = loadMetadata(os.path.join(MEAN_PATH, 'mean_face_224.mat'))['image_mean']
self.eyeLeftMean = loadMetadata(os.path.join(MEAN_PATH, 'mean_left_224.mat'))['image_mean']
self.eyeRightMean = loadMetadata(os.path.join(MEAN_PATH, 'mean_right_224.mat'))['image_mean']
self.transformFace = transforms.Compose([
transforms.Resize(self.imSize),
transforms.ToTensor(),
SubtractMean(meanImg=self.faceMean),
])
self.transformEyeL = transforms.Compose([
transforms.Resize(self.imSize),
transforms.ToTensor(),
SubtractMean(meanImg=self.eyeLeftMean),
])
self.transformEyeR = transforms.Compose([
transforms.Resize(self.imSize),
transforms.ToTensor(),
SubtractMean(meanImg=self.eyeRightMean),
])
if split == 'test':
mask = self.metadata['labelTest']
elif split == 'val':
mask = self.metadata['labelVal']
else:
mask = self.metadata['labelTrain']
self.indices = np.argwhere(mask)[:,0]
print('Loaded iTracker dataset split "%s" with %d records...' % (split, len(self.indices)))
def loadImage(self, path):
try:
im = Image.open(path).convert('RGB')
except OSError:
raise RuntimeError('Could not read image: ' + path)
#im = Image.new("RGB", self.imSize, "white")
return im
def makeGrid(self, params):
gridLen = self.gridSize[0] * self.gridSize[1]
grid = np.zeros([gridLen,], np.float32)
indsY = np.array([i // self.gridSize[0] for i in range(gridLen)])
indsX = np.array([i % self.gridSize[0] for i in range(gridLen)])
condX = np.logical_and(indsX >= params[0], indsX < params[0] + params[2])
condY = np.logical_and(indsY >= params[1], indsY < params[1] + params[3])
cond = np.logical_and(condX, condY)
grid[cond] = 1
return grid
def __getitem__(self, index):
index = self.indices[index]
imFacePath = os.path.join(self.dataPath, '%05d/appleFace/%05d.jpg' % (self.metadata['labelRecNum'][index], self.metadata['frameIndex'][index]))
imEyeLPath = os.path.join(self.dataPath, '%05d/appleLeftEye/%05d.jpg' % (self.metadata['labelRecNum'][index], self.metadata['frameIndex'][index]))
imEyeRPath = os.path.join(self.dataPath, '%05d/appleRightEye/%05d.jpg' % (self.metadata['labelRecNum'][index], self.metadata['frameIndex'][index]))
imFace = self.loadImage(imFacePath)
imEyeL = self.loadImage(imEyeLPath)
imEyeR = self.loadImage(imEyeRPath)
imFace = self.transformFace(imFace)
imEyeL = self.transformEyeL(imEyeL)
imEyeR = self.transformEyeR(imEyeR)
gaze = np.array([self.metadata['labelDotXCam'][index], self.metadata['labelDotYCam'][index]], np.float32)
faceGrid = self.makeGrid(self.metadata['labelFaceGrid'][index,:])
# to tensor
row = torch.LongTensor([int(index)])
faceGrid = torch.FloatTensor(faceGrid)
gaze = torch.FloatTensor(gaze)
return row, imFace, imEyeL, imEyeR, faceGrid, gaze
def __len__(self):
return len(self.indices)