-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathsample_regression.py
More file actions
84 lines (65 loc) · 2.69 KB
/
sample_regression.py
File metadata and controls
84 lines (65 loc) · 2.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
#!/usr/bin/env python
# coding: utf-8
import sys, io, os, math, torch, csv, time
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from torch import nn
import torch.autograd as autograd
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable, grad
from qulacs import QuantumState, Observable, QuantumCircuit, ParametricQuantumCircuit
from qulacs.gate import U1,U2,U3,RandomUnitary
import base.encoder as enc
from base.encoder import QulacsEncoderFactory
from base.model_regression import qcNN, cNN, qNN, count_obs, xavier_init
from base.data_gen_regression import DataGen
from base.func import get_rmse, torch_fix_seed
start = time.time()
## set params ##
seed = 0
n0 = 1000
M_train = 1000
it_end = 1000
### set random seed ###
torch_fix_seed(seed=seed)
path = 'out_{}'.format(seed)
if not os.path.isdir(path):
os.mkdir(path)
### main task ###
for nqubit in [2,3,4,5]:
print("----- n = {} -----".format(nqubit))
## Data preparation ##
dgen = DataGen(nqubit=nqubit, M=M_train, encoder="manual_haar", seed=seed)
x_train, y_train, x_test, y_test, gen_enc = dgen.gen_data()
### Learning ######
# qcNN #
model_qc = qcNN(nqubit=nqubit, M=M_train, n0=n0, seed=seed, gen_enc=gen_enc)
loss_learn_qc, y_pred_learn_qc = model_qc.learn(x_train, y_train, it_end=it_end)
rmse_learn_qc = get_rmse(loss_learn_qc, "learn", "qcNN")
# cNN #
model_c = cNN(nqubit=nqubit, n0=n0, seed=seed)
loss_learn_c, y_pred_learn_c = model_c.learn(x_train, y_train, it_end=it_end)
rmse_learn_c = get_rmse(loss_learn_c, "learn", "cNN")
# qNN #
model_q = qNN(nqubit=nqubit, Lq=10, seed=seed)
loss_learn_q, y_pred_learn_q = model_q.learn(x_train, y_train, it_end=it_end)
rmse_learn_q = get_rmse(loss_learn_q, "learn", "qNN")
### Testing ######
# qcNN #
loss_test_qc, y_pred_test_qc = model_qc.test(x_test, y_test)
rmse_test_qc = get_rmse(loss_test_qc, "test", "qcNN")
# cNN #
loss_test_c, y_pred_test_c = model_c.test(x_test, y_test)
rmse_test_c = get_rmse(loss_test_c, "test", "cNN")
# qNN #
loss_test_q, y_pred_test_q = model_q.test(x_test, y_test)
rmse_test_q = get_rmse(loss_test_q, "test", "qNN")
with open('out_{}/rmse_n{}.csv'.format(seed, nqubit), 'w') as f:
writer = csv.writer(f)
writer.writerow(['rmse_learn_qc', 'rmse_learn_c', 'rmse_learn_q', 'rmse_test_qc', 'rmse_test_c', 'rmse_test_q'])
writer.writerow([rmse_learn_qc, rmse_learn_c, rmse_learn_q, rmse_test_qc, rmse_test_c, rmse_test_q])
print("Finish !")
print(f'elapsed time: {time.time() - start:.1f}s')