• R/O
  • HTTP
  • SSH
  • HTTPS

ultron: Commit

高橋君


Commit MetaInfo

Revisão18722e5eda77bb3084097958caaf4cb7f882ff98 (tree)
Hora2019-01-19 17:30:50
Autorshuepluter <shupeluter@hotm...>
Commitershuepluter

Mensagem de Log

change

Mudança Sumário

Diff

--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,7 @@
11 <?xml version="1.0" encoding="UTF-8"?>
22 <project version="4">
33 <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6 (Sample)" project-jdk-type="Python SDK" />
4+ <component name="PyCharmProfessionalAdvertiser">
5+ <option name="shown" value="true" />
6+ </component>
47 </project>
\ No newline at end of file
--- a/.idea/ultreron.iml
+++ b/.idea/ultreron.iml
@@ -5,6 +5,7 @@
55 <sourceFolder url="file://$MODULE_DIR$/src/main/Python" isTestSource="false" />
66 <sourceFolder url="file://$MODULE_DIR$/config" isTestSource="false" />
77 <sourceFolder url="file://$MODULE_DIR$/src/main/Python/config" isTestSource="false" />
8+ <sourceFolder url="file://$MODULE_DIR$/src/main/Python/api" isTestSource="false" />
89 </content>
910 <orderEntry type="jdk" jdkName="Python 3.6 (Sample)" jdkType="Python SDK" />
1011 <orderEntry type="sourceFolder" forTests="false" />
--- /dev/null
+++ b/build/lib/Data.py
@@ -0,0 +1,38 @@
1+from typing import List, Any
2+
3+import numpy
4+
5+
6+class Data:
7+ # 想定されるれ連続日数
8+ # データ(0,0,1xxxxxx)
9+ dat = []
10+ label = ""
11+ __org_file = "" \
12+ ""
13+
14+ def getLable(self):
15+
16+ return self.label
17+
18+ def getData(self):
19+
20+ return self.dat
21+
22+ def setLabel(self, value: str):
23+ self.label = value
24+
25+ def setData(self, value):
26+ self.dat = value
27+
28+ def set_org_data(self, value):
29+ self.__org_file = value
30+
31+ def get_array_data(self):
32+ result: List[Any] = []
33+ for data in self.dat:
34+ result.append(numpy.array(data, dtype=numpy.float32))
35+ return numpy.array(result)
36+
37+ def get_org_file(self):
38+ return self.__org_file
--- /dev/null
+++ b/build/lib/DataReader.py
@@ -0,0 +1,85 @@
1+import os
2+import Data
3+import yaml
4+import numpy
5+from typing import List
6+from Exceptions import IleagalDataException
7+
8+data_size = 10
9+data_length = 50
10+
11+
12+class DataReader:
13+ DATA_SOURCE = ""
14+
15+ def __init__(self):
16+ with open('config\\toolconf.yml', 'r') as config:
17+ confdata = yaml.load(config)
18+ self.DATA_SOURCE = confdata['dataPath']
19+
20+ def get_learning_data(self):
21+ '''
22+
23+ :return: トレーニングデータ、トレーニングラベル、テストデータ、テストラベル
24+ '''
25+
26+ target_data = self.parse_data_files()
27+ datasets: List(numpy.ndarray) = []
28+ labels: List(str) = []
29+
30+ for cdata in target_data:
31+ datasets.append(cdata.get_array_data())
32+ labels.append(cdata.getLable())
33+
34+ center = len(datasets)//2
35+ offset = len(datasets) % 2
36+ center = center + offset
37+
38+ return numpy.array(datasets[:center]), numpy.array(labels[:center]), numpy.array(datasets[center:]), numpy.array(labels[center:])
39+
40+ def __check_data(self, data: Data):
41+
42+ # データ元ファイルのパスを保持していること
43+ if data.get_org_file() == "":
44+ raise IleagalDataException(data, "データファイルパスが設定されちません。")
45+ if data.getLable() == "":
46+ raise IleagalDataException(data, "ラベルが設定されてません。")
47+ if len(data.getData()) != data_size :
48+ raise IleagalDataException(data, "要素数(=行数)が想定と異なります。")
49+
50+ for current in data.get_array_data():
51+ if len(current) != data_length:
52+ raise IleagalDataException(data, "データのサイズが想定と異なります。")
53+ return True
54+
55+ def parse_data_files(self):
56+ data = []
57+
58+ for file in self.__walkDataDirectory(self.DATA_SOURCE):
59+ data.append(self._read(file))
60+
61+ return data
62+
63+ def __walkDataDirectory(self, directory):
64+ for root, dirs, files in os.walk(directory):
65+
66+ for file in files:
67+ yield os.path.join(root, file)
68+
69+ def _read(self, file: str):
70+ result = Data.Data()
71+ if os.path.isfile(file):
72+ datafile = open(file)
73+ orgdata = datafile.readlines()
74+ data = []
75+
76+ datafile.close()
77+ result.setLabel(int(orgdata[0].strip()))
78+
79+ for i in range(1, len(orgdata)):
80+ line_data = orgdata[i].replace("\n", "")
81+ data.append(line_data.split(','))
82+ result.setData(data)
83+ result.set_org_data(file)
84+
85+ return result
--- /dev/null
+++ b/build/lib/DatasetGenerator.py
@@ -0,0 +1,32 @@
1+from chainer.datasets import tuple_dataset
2+from Data import Data
3+
4+class DatasetGenerator:
5+
6+ def generateDataset(self,dataList):
7+ graphdata = [] #type: List[float]
8+ labeldata = [] #type: List[int]
9+
10+ data ="" #type: Data
11+ for data in dataList:
12+ temgraphdata = self.createGraphData(data.getData());
13+ #TODO データサイズがマジックナンバーはやばい。
14+ if(data.getLable() != '' and len(temgraphdata)==500):
15+ labeldata.append(int(data.getLable()))
16+ graphdata.append(temgraphdata)
17+
18+ return graphdata,labeldata
19+
20+ def createGraphData(self,targetData):
21+
22+ result=[] #type: List[float]
23+
24+ for strline in targetData:
25+ floatLine=[] #type: List[float]
26+
27+ for block in strline:
28+ floatLine.append(float(block))
29+ #TODO floatLineの変換処理(データ正規化)を入れておく
30+ result.extend(floatLine)
31+
32+ return result
--- /dev/null
+++ b/build/lib/Exceptions.py
@@ -0,0 +1,9 @@
1+from Data import Data
2+
3+
4+class IleagalDataException(Exception):
5+ def __init__(self, data: Data, msg: str):
6+ self.__msg = data.get_org_file() + "から生成されるデータに問題があります。\n" + msg
7+ self.__data = data
8+
9+
--- /dev/null
+++ b/build/lib/Learning.py
@@ -0,0 +1,139 @@
1+import numpy
2+
3+from DataReader import DataReader
4+
5+from keras.utils.np_utils import to_categorical
6+from keras.models import Model
7+from keras.models import model_from_yaml
8+from keras.layers import Dense, Input, Dropout
9+
10+
11+class ModelGenerator:
12+ """
13+ 学習モデルを生成するクラス。主に以下の処理を実施する。
14+ 1. 指定ディレクトリ配下に配置されてる学習データから、テストデータ、訓練データを作成する。
15+ 2. 訓練データを用いてモデルの重みを生成
16+ 3. モデルの定義および重みづけデータを外部ファイルに出力
17+ 4. 外部ファイルに定義されたモデルデータをよび重みづけデータをもとに学習モデルを再現
18+
19+ """
20+
21+
22+def generate_data():
23+
24+ reader = DataReader()
25+ __x_train, __y_train, __x_test, __y_test = reader.get_learning_data()
26+
27+ __x_train = __x_train.reshape(len(__x_train), 500).astype(float)
28+ __x_test = __x_test.reshape((len(__x_test)), 500).astype(float)
29+
30+ __y_train = to_categorical(__y_train.astype('int32'), 11)
31+ __y_test = to_categorical(__y_test.astype('int32'), 11)
32+ return __x_train, __y_train, __x_test, __y_test
33+
34+
35+def create_neural_network():
36+
37+ inputs = Input(shape=(500,))
38+ nw = Dense(200, activation='relu')(inputs)
39+ nw = Dropout(.5)(nw)
40+ nw = Dense(100, activation='relu')(nw)
41+ nw = Dropout(.5)(nw)
42+ predictions = Dense(11, activation='softmax')(nw)
43+
44+ model = Model(inputs=inputs, outputs=predictions)
45+ model.compile(optimizer='rmsprop',
46+ loss='categorical_crossentropy',
47+ metrics=['accuracy'])
48+ return model
49+
50+
51+def lean(model):
52+
53+ # データの作成
54+ x_train, y_train, x_test, y_test = generate_data()
55+
56+ # 学習と評価
57+ history = model.fit(x_train,y_train, batch_size=100, epochs=20, verbose=1,
58+ validation_data=(x_test, y_test))
59+
60+ return model
61+
62+
63+def persist_leaning_modle(model, model_path='data.yml', param_path='param.hdf5' ):
64+
65+ # モデルの保存
66+ with open(model_path,mode='w') as f:
67+ f.write(model.to_yaml())
68+
69+ # パラメータの保存
70+ model.save_weights(param_path)
71+
72+
73+def generate_leaning_model():
74+ model = create_neural_network() # type Model
75+ model = lean(model)
76+ # モデルデータの保存
77+ persist_leaning_modle(model)
78+
79+
80+def regenerate_leaning_model(model_path='data.yml', param_path='param.hdf5'):
81+
82+ with open(model_path) as model_yaml_file:
83+ model_yaml = model_yaml_file.read()
84+
85+ model = model_from_yaml(model_yaml)
86+
87+ model.load_weights(
88+ filepath=param_path
89+ )
90+ model.compile
91+ return model
92+
93+
94+def main():
95+ model = regenerate_leaning_model()
96+
97+ # Label 4 data
98+ ''' 以下分類実行例'''
99+ sample = numpy.array(
100+ [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,
101+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,
102+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,
103+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,
104+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
105+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
106+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
107+0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
108+0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
109+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
110+ , dtype=numpy.float32)
111+
112+ sample2 = numpy.array([
113+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
114+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
115+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
116+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
117+0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
118+0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
119+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
120+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
121+0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
122+1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
123+]
124+ , dtype=numpy.float32)
125+
126+
127+ sample2 = sample2.reshape(1,500).astype(float)
128+ result = model.predict(sample2,batch_size=1).astype(float)
129+ numpy.set_printoptions(precision=3, suppress=True)
130+ print(result)
131+'''
132+ sample2 = sample2.reshape(1,500).astype(float)
133+ result = model.predict(sample2,batch_size=1).astype(float)
134+ numpy.set_printoptions(precision=3, suppress=True)
135+ print(result)
136+ '''
137+
138+
139+main()
--- /dev/null
+++ b/build/lib/Predictor.py
@@ -0,0 +1,31 @@
1+from keras.models import Model
2+from Learning import ModelGenerator
3+import Learning
4+import numpy
5+
6+
7+def predict(target: str):
8+ """
9+ 与えられたパターン(50 x 10)を使い、連続期間を判定する。
10+ :param target:
11+ :return:
12+ """
13+ # データ分割
14+ target = target.split(",")
15+
16+ # データサイズ確認
17+ if(500 != len(target)):
18+ # 例外をスロー
19+ print("Unsuported Process. yet")
20+
21+ # データ変換
22+ target = numpy.array(target, dtype=numpy.float32)
23+ target = target.reshape(1, 500).astype(float)
24+ model = Learning.regenerate_leaning_model() #type: Model
25+ result = model.predict(target,batch_size=1).astype(float)
26+
27+ # 結果をログ出力(debug)
28+ numpy.set_printoptions(precision=3, suppress=True)
29+ return result
30+
31+
--- /dev/null
+++ b/build/lib/foo.py
@@ -0,0 +1,32 @@
1+from keras.datasets import mnist
2+from keras.layers import Dense,Input,Dropout
3+import keras
4+
5+
6+(x_train, y_train), (x_test, y_test) = mnist.load_data()
7+
8+
9+print("type:"+str(type(x_train))+"ren"+str(len(x_train)))
10+print("type:"+str(type(x_train[0]))+"ren:"+str(len(x_train[0])))
11+print("type:"+str(type(x_train[0][0]))+"ren:"+str(len(x_train[0][0])))
12+print("type:"+str(type(x_train[0][0][0])))
13+x_train = x_train.reshape(60000, 784).astype('float32') /255
14+x_test = x_test.reshape(10000, 784).astype('float32') /255
15+
16+y_train = keras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
17+y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)
18+
19+inputs = Input(shape=(784,))
20+
21+nw = Dense(512, activation='relu')(inputs)
22+nw = Dropout(.5)(nw)
23+nw = Dense(512, activation='relu')(nw)
24+nw = Dropout(.5)(nw)
25+predictions = Dense(10, activation='softmax')(nw)
26+
27+# モデルの定義(インプットとレイヤーを指定)
28+#model = Model(inputs=inputs, outputs=predictions)
29+
30+#model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
31+
32+#history = model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=1, validation_data=(x_test, y_test))
Binary files /dev/null and b/dist/ultreron-0.0.1-py3-none-any.whl differ
Binary files /dev/null and b/dist/ultreron-0.0.1-py3.6.egg differ
Binary files /dev/null and b/dist/ultreron-0.0.1.tar.gz differ
Show on old repository browser