Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
77 commits
Select commit Hold shift + click to select a range
f071377
changes for python3.6
Whadup Dec 5, 2017
6580510
bla
Whadup Dec 5, 2017
06248b0
cifar10
Whadup Dec 5, 2017
55a0f46
cifar10
Whadup Dec 5, 2017
ac3132a
kernel experiment
Whadup Dec 5, 2017
0d08d37
kernel experiment
Whadup Dec 5, 2017
40b2efe
kernel experiment
Whadup Dec 5, 2017
6b4228a
kernel experiment
Whadup Dec 5, 2017
eaa3ab9
kernel experiment
Whadup Dec 5, 2017
1395036
kernel experiment
Whadup Dec 5, 2017
bb3de95
kernel experiment
Whadup Dec 5, 2017
8a778f5
kernel experiment
Whadup Dec 5, 2017
c23f97b
new kernels
Whadup Dec 5, 2017
ed73564
new kernels
Whadup Dec 5, 2017
0c9f024
new kernels
Whadup Dec 5, 2017
b0abfee
new kernels
Whadup Dec 5, 2017
c039d7f
new kernels
Whadup Dec 5, 2017
e1107b3
new kernels
Whadup Dec 5, 2017
81d3074
new kernels
Whadup Dec 5, 2017
5d1316f
new kernels
Whadup Dec 5, 2017
342c5a3
new kernels
Whadup Dec 5, 2017
fdea7e8
new kernels
Whadup Dec 5, 2017
f9687d5
new kernels
Whadup Dec 5, 2017
c7dac7a
new kernels
Whadup Dec 5, 2017
7802865
new kernels
Whadup Dec 5, 2017
f396acd
new kernels
Whadup Dec 5, 2017
7a09b15
new kernels
Whadup Dec 5, 2017
9f146f8
new kernels
Whadup Dec 5, 2017
d307a68
new kernels
Whadup Dec 5, 2017
aa57d51
new kernels
Whadup Dec 5, 2017
6781590
new kernels
Whadup Dec 5, 2017
aabe6a2
new kernels
Whadup Dec 5, 2017
d164bf6
new kernels
Whadup Dec 6, 2017
ca889c7
new kernels
Whadup Dec 6, 2017
6d77726
new kernels
Whadup Dec 6, 2017
b31fa2d
new kernels
Whadup Dec 6, 2017
9c52922
new kernels
Whadup Dec 6, 2017
8f4dfa6
new kernels
Whadup Dec 6, 2017
e3c87eb
new kernels
Whadup Dec 6, 2017
64bd6fb
new kernels
Whadup Dec 6, 2017
b5197d8
new kernels
Whadup Dec 6, 2017
c9de74d
new kernels
Whadup Dec 6, 2017
e4cabc7
new kernels
Whadup Dec 6, 2017
7c8c844
new kernels
Whadup Dec 6, 2017
2d30eb6
new kernels
Whadup Dec 6, 2017
65c4a21
new kernels
Whadup Dec 6, 2017
2503b14
new kernels
Whadup Dec 6, 2017
f4c2f0f
new kernels
Whadup Dec 6, 2017
030b0a1
new kernels
Whadup Dec 6, 2017
a03f804
new kernels
Whadup Dec 6, 2017
88958fd
new kernels
Whadup Dec 6, 2017
6c4be48
new kernels
Whadup Dec 6, 2017
fed317a
new kernels
Whadup Dec 6, 2017
d2240cf
new kernels
Whadup Dec 6, 2017
4eae0a4
new kernels
Whadup Dec 6, 2017
08f1eda
new kernels
Whadup Dec 6, 2017
e4b3b96
new kernels
Whadup Dec 6, 2017
726637c
new kernels
Whadup Dec 6, 2017
31fcce4
new kernels
Whadup Dec 6, 2017
3b13711
new kernels
Whadup Dec 6, 2017
8e50c0b
new kernels
Whadup Dec 6, 2017
9434d3a
new kernels
Whadup Dec 6, 2017
4d3bb55
new kernels
Whadup Dec 6, 2017
00c691b
new kernels
Whadup Dec 7, 2017
d6f23c9
new kernels
Whadup Dec 8, 2017
5cfc2a1
new kernels
Whadup Dec 8, 2017
1703139
new kernels
Whadup Dec 8, 2017
d8cf26a
new kernels
Whadup Dec 8, 2017
3fc9608
new kernels
Whadup Dec 8, 2017
eb206b4
new kernels
Whadup Dec 8, 2017
71d2b0a
new kernels
Whadup Dec 8, 2017
5494139
new kernels
Whadup Dec 8, 2017
8be78df
new kernels
Whadup Dec 8, 2017
68d19bb
new kernels
Whadup Dec 8, 2017
fee6bec
new kernels
Whadup Dec 8, 2017
9b401f3
new kernels
Whadup Dec 9, 2017
45aa60f
new kernels
Whadup Dec 9, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__pycache__
31 changes: 31 additions & 0 deletions cifar.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import keras
from keras.datasets import cifar10
import numpy as np

def load():
num_classes = 10 # number of classes
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# print(x_train.shape[0], 'train samples')
# print(x_test.shape[0], 'test samples')
# print(type(x_train))

x_train = x_train.reshape((x_train.shape[0],-1))
x_test = x_test.reshape((x_test.shape[0],-1))
# print('x_train shape:', x_train.shape)
# print(x_train[0],x_train.dtype)
n, D = x_train.shape # (n_sample, n_feature)
x_train = np.divide(x_train,255.0)
x_test = np.divide(x_test,255.0)

print("Load CIFAR10 dataset.")
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

d = np.int32(n / 2) * 2 # number of random features

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
R = np.max(np.linalg.norm(x_train,2,axis=1))**2
return num_classes,x_train,x_test,y_train,y_test,n,D,R,d
103 changes: 103 additions & 0 deletions dnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
'''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatx=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''

from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy.linalg
from numpy.linalg import svd
import mnist
import cifar
import imdb
batch_size = 16
epochs = 50
data_augmentation = False

# The data, shuffled and split between train and test sets:
num_classes,x_train,x_test,y_train,y_test,n,D,R,d = imdb.load()

model = Sequential()
#model.add(Flatten(input_shape=x_train.shape[1:]))
layer1 = Dense(256,input_shape=(x_train.shape[1],))#
#Conv2D(32, (3, 3),strides=(3,3),padding='same',
# )
model.add(layer1)
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
# layer2 = Dense(128)
#Conv2D(32, (3, 3))
# model.add(layer2)
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
#model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.25))
# layer3 = Dense(64)#Conv2D(64, (3, 3), padding='same')
# model.add(layer3)
# model.add(Activation('relu'))
#model.add(Dropout(0.5))
#model.add(Dense(100))
#model.add(Activation('relu'))
layer5 = Dense(num_classes)
model.add(layer5)
model.add(Activation('softmax'))
model.summary()
# initiate RMSprop optimizer
opt = keras.optimizers.sgd(lr=0.01)

# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])


num_classes,x_train,x_test,y_train,y_test,n,D,R,d = imdb.load()
from numpy.random import shuffle


model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
verbose=2)


# spectrum1 = svd(layer1.get_weights()[0],compute_uv=False)
# spectrum2 = svd(layer2.get_weights()[0],compute_uv=False)
# spectrum3 = svd(layer3.get_weights()[0],compute_uv=False)
# #spectrum4 = svd(numpy.reshape(layer4.get_weights()[0],(64*9,64)),compute_uv=False)
# spectrum5 = svd(layer5.get_weights()[0],compute_uv=False)

# numpy.set_printoptions(threshold=numpy.nan)
# #print(layer1.get_weights()[0].dot(layer1.get_weights()[0].transpose()))
# W = layer1.get_weights()[0].transpose()
# print(W.shape)
# print(W.dot(W.transpose()).shape)
# W = W/(numpy.linalg.norm(W,axis=1,ord=2).reshape(W.shape[0],1))
# print(-numpy.sort(-(W.dot(W.transpose())).flatten())[W.shape[0]:W.shape[0]*3:2])
# W = layer2.get_weights()[0].transpose()
# W = W/(numpy.linalg.norm(W,axis=1,ord=2).reshape(W.shape[0],1))
# print(-numpy.sort(-(W.dot(W.transpose())).flatten())[W.shape[0]:W.shape[0]*3:2])
# W = layer3.get_weights()[0].transpose()
# W = W/(numpy.linalg.norm(W,axis=1,ord=2).reshape(W.shape[0],1))
# print(-numpy.sort(-(W.dot(W.transpose())).flatten())[W.shape[0]:W.shape[0]*3:2])
# #print(spectrum4)
# W = layer5.get_weights()[0].transpose()
# W = W/(numpy.linalg.norm(W,axis=1,ord=2).reshape(W.shape[0],1))
# print(-numpy.sort(-(W.dot(W.transpose())).flatten())[W.shape[0]:W.shape[0]*3:2])

# print(spectrum1)
# print(spectrum2)
# print(spectrum3)
# print(spectrum5)

#preds = model.predict_on_batch(x_test)
#print("done",preds[0])
47 changes: 47 additions & 0 deletions imdb.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import keras
import numpy as np
from keras.datasets import imdb
from scipy.sparse import csr_matrix
def load():
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=10000,
skip_top=30,
maxlen=None)
indptr = [0]
indices = []
data = []
vocabulary = {}
for d in x_train:
for index in d:
indices.append(index)
data.append(1.0/len(d))
indptr.append(len(indices))
x_train = csr_matrix((data, indices, indptr), dtype=float).toarray()

indptr = [0]
indices = []
data = []
vocabulary = {}
for d in x_test:
for index in d:
indices.append(index)
data.append(1.0/len(d))
indptr.append(len(indices))
x_test = csr_matrix((data, indices, indptr), dtype=float).toarray()
print("Load IMDB dataset.")
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print(x_train.shape[1],"features")
#x_train = keras.preprocessing.text.one_hot(x_train,10000)
print(x_train[0])
#x_test = keras.preprocessing.text.one_hot(x_test,10000)
n, D = x_train.shape # (n_sample, n_feature)
d = np.int32(n / 2) * 2 # number of random features
num_classes = 2
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
R = np.max(np.linalg.norm(x_train,2,axis=1))**2
# x_train/=R
# x_test/=R
# R = 1.0
return num_classes,x_train,x_test,y_train,y_test,n,D,R,d
Loading