aboutsummaryrefslogtreecommitdiffstats
path: root/old/code.py
blob: d6706074d0ea3abbba5834d49cbedd2cd19d26ee (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import numpy as np
import random
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import KFold
from sklearn.externals import joblib
import matplotlib.pyplot as plt

scaler = StandardScaler() 

def gen_classifier(X, y):
    #scaler.fit_transform(X)
    clf = KNeighborsClassifier(n_neighbors=1, weights='distance')
    clf.fit(X, y)
    #joblib.dump(clf, filename)
    return clf

def main(datas, labels):
    global scaler
    # datas, labels = loadFile('data/data3.txt')
    
    scaler.fit_transform(datas)

    for i in range(len(datas)):
        #datas[i] = datas[i][1000:-300]
        for j in range(1050, 1700, 50):
            datas.append(np.absolute(np.fft.fft(datas[i][j:j+50])))
            labels.append(labels[i])
        datas[i] = np.absolute(np.fft.fft(datas[i][1000:1050]))

        #datas[i] = np.absolute(np.fft.fft( datas[i] ))
    datas = np.array(datas)
    labels = np.array(labels)

    kf = KFold(len(labels), n_folds=5)
    for train, test in kf:
        neigh = KNeighborsClassifier(n_neighbors=1, weights='distance')
        neigh.fit(datas[train], labels[train])
        #print neigh.predict(datas[test])
        #print labels[test]
        print neigh.score(datas[test], labels[test])


if __name__ == '__main__':
    main()