Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit9320042

Browse files
author
Guled
committed
Swift lint, removed unnecessary code
1 parent22aae90 commit9320042

File tree

12 files changed

+245
-263
lines changed

12 files changed

+245
-263
lines changed

‎Example/MLKit/GameScene.swift‎

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
import SpriteKit
1010
import MachineLearningKit
11-
11+
/*
1212
class GameScene: SKScene, SKPhysicsContactDelegate {
1313

1414
// ADDITIONS
@@ -43,7 +43,7 @@ class GameScene: SKScene, SKPhysicsContactDelegate {
4343
/// The best birds from the previous generation
4444
var lastBestGen: [FlappyGenome] = []
4545

46-
/// SKLabel
46+
/// SKLabel
4747
var generationLabel: SKLabelNode!
4848
var fitnessLabel:SKLabelNode!
4949

@@ -537,3 +537,4 @@ class GameScene: SKScene, SKPhysicsContactDelegate {
537537
}
538538
}
539539
}
540+
*/

‎Example/MLKit/GameViewController.swift‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import UIKit
1010
import SpriteKit
1111
import MachineLearningKit
1212
import Upsurge
13-
13+
/*
1414
extension SKNode {
1515
class func unarchiveFromFile(_ file: String) -> SKNode? {
1616

@@ -117,3 +117,4 @@ class GameViewController: UIViewController {
117117
}
118118

119119
}
120+
*/

‎Example/MLKit/GeneticOperations.swift‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ import MachineLearningKit
1111
import Upsurge
1212

1313
/// The GeneticOperations class manages encoding genes into weights for the neural network and decoding neural network weights into genes. These methods are not provided in the framework itself, rather it was for the game example.
14+
/*
1415
final class GeneticOperations {
15-
1616
/**
1717
The encode method converts a NueralNet object to an array of floats by taking the weights of each layer and placing them into an array.
1818

@@ -110,3 +110,4 @@ final class GeneticOperations {
110110
return brain
111111
}
112112
}
113+
*/

‎Example/Pods/Pods.xcodeproj/project.pbxproj‎

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more aboutcustomizing how changed files appear on GitHub.

‎Example/Tests/NeuralNetworkSpec.swift‎

Lines changed: 1 addition & 165 deletions
Original file line numberDiff line numberDiff line change
@@ -16,174 +16,10 @@ class NeuralNetworkSpec: QuickSpec {
1616

1717
overridefunc spec(){
1818

19-
it("Should be able to run a simple XOR example using a single layer Perceptron architecture."){
19+
it(""){
2020

21-
print("\n")
22-
print("XOR perceptron TEST\n")
23-
24-
letnet=NeuralNet(numberOfInputNeurons:2, hiddenLayers:[], numberOfOutputNeurons:1)
25-
net.printNet()
26-
27-
net.trainingSet=Matrix<Float>(rows:4, columns:3, elements:[1.0,0.0,0.0,1.0,0.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0])
28-
29-
net.targetOutputSet=ValueArray<Float>([0.0,0.0,0.0,1.0])
30-
31-
net.maxEpochs=10
32-
33-
net.targetError=0.002
34-
35-
net.learningRate=1.0
36-
37-
net.trainingType=TrainingType.perceptron
38-
39-
net.activationFuncType=ActivationFunctionType.step
40-
41-
lettrainedNet=try! net.trainNet()
42-
43-
trainedNet.printNet()
44-
45-
trainedNet.printTrainedNet(network: trainedNet)
46-
47-
varactualOutput:[Float]=[]
48-
49-
forvalin trainedNet.targetOutputSet{
50-
actualOutput.append(val)
51-
}
52-
53-
expect(trainedNet.estimatedOutputAsArray).to(equal(actualOutput))
54-
55-
}
56-
57-
it("Should be able to run a simple example using a single layer Adaline architecture."){
58-
59-
letnet=NeuralNet(numberOfInputNeurons:3, hiddenLayers:[], numberOfOutputNeurons:1)
60-
61-
net.printNet()
62-
63-
net.trainingSet=Matrix<Float>(rows:7, columns:4, elements:[1.0,0.98,0.94,0.95,1.0,0.60,0.60,0.85,1.0,0.35,0.15,0.15,1.0,0.25,0.30,0.98,1.0,0.75,0.85,0.91,1.0,0.43,0.57,0.87,1.0,0.05,0.06,0.01])
64-
65-
net.targetOutputSet=ValueArray<Float>([0.80,0.59,0.23,0.45,0.74,0.63,0.10])
66-
67-
net.maxEpochs=10
68-
69-
net.targetError=0.0001
70-
71-
net.learningRate=0.5
72-
73-
net.trainingType=TrainingType.adaline
74-
75-
net.activationFuncType=ActivationFunctionType.linear
76-
77-
lettrainedNet=try! net.trainNet()
78-
79-
trainedNet.printNet()
80-
81-
trainedNet.printTrainedNet(network: trainedNet)
82-
83-
varactualOutput:[Double]=[]
84-
85-
forvalin trainedNet.targetOutputSet{
86-
actualOutput.append(Double(val))
87-
88-
}
89-
90-
varestimatedOutputAsDouble:[Double]=[]
91-
92-
forvalin trainedNet.estimatedOutputAsArray{
93-
estimatedOutputAsDouble.append(Double(val))
94-
}
95-
96-
expect(estimatedOutputAsDouble).to(beCloseTo(actualOutputas[Double], within:1.0))
97-
}
98-
99-
it("Should be able to run a simple example using a BackPropagation architecture."){
100-
101-
letnet=NeuralNet.init(numberOfInputNeurons:2, hiddenLayers:[3], numberOfOutputNeurons:2)
102-
103-
print("---------------------backpropagation INIT---------------------")
104-
105-
net.printNet()
106-
107-
net.trainingSet=Matrix<Float>(rows:10, columns:3, elements:[1.0,1.0,0.73,
108-
1.0,1.0,0.81,
109-
1.0,1.0,0.86,
110-
1.0,1.0,0.95,
111-
1.0,0.0,0.45,
112-
1.0,1.0,0.70,
113-
1.0,0.0,0.51,
114-
1.0,1.0,0.89,
115-
1.0,1.0,0.79,
116-
1.0,0.0,0.54])
117-
118-
net.targetOutputMatrix=Matrix<Float>(rows:10, columns:2, elements:[1.0,0.0,
119-
1.0,0.0,
120-
1.0,0.0,
121-
1.0,0.0,
122-
1.0,0.0,
123-
0.0,1.0,
124-
0.0,1.0,
125-
0.0,1.0,
126-
0.0,1.0,
127-
0.0,1.0])
128-
129-
net.maxEpochs=1000
130-
131-
net.targetError=0.002
132-
133-
net.learningRate=0.1
134-
135-
net.trainingType=.backpropagation
136-
137-
net.activationFuncType=ActivationFunctionType.siglog
138-
139-
net.activationFuncTypeOfOutputLayer=.linear
140-
141-
lettrainedNet=try! net.trainNet()
142-
143-
trainedNet.printNet()
144-
145-
trainedNet.printTrainedNet(network: trainedNet)
146-
147-
}
148-
149-
it("Should be able to run a simple XOR example using a single layer Perceptron architecture with multiple hidden layers."){
150-
151-
print("\n")
152-
print("XOR perceptron TEST multiple hidden layers\n")
153-
154-
letnet=NeuralNet(numberOfInputNeurons:2, hiddenLayers:[2,2], numberOfOutputNeurons:1)
155-
net.printNet()
156-
157-
net.trainingSet=Matrix<Float>(rows:4, columns:3, elements:[1.0,0.0,0.0,1.0,0.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0])
158-
159-
net.targetOutputSet=ValueArray<Float>([0.0,0.0,0.0,1.0])
160-
161-
net.maxEpochs=10
162-
163-
net.targetError=0.002
164-
165-
net.learningRate=1.0
166-
167-
net.trainingType=TrainingType.perceptron
168-
169-
net.activationFuncType=ActivationFunctionType.step
170-
171-
lettrainedNet=try! net.trainNet()
172-
173-
trainedNet.printNet()
174-
175-
trainedNet.printTrainedNet(network: trainedNet)
176-
177-
varactualOutput:[Float]=[]
178-
179-
forvalin trainedNet.targetOutputSet{
180-
actualOutput.append(val)
181-
}
182-
183-
expect(trainedNet.estimatedOutputAsArray).to(equal(actualOutput))
18421

18522
}
18623

18724
}
188-
18925
}

‎MLKit-PlayGround.playground/Contents.swift‎

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33
import UIKit
44
import Upsurge
5-
6-
5+
import MachineLearningKit
6+
/* Already implemented in MachineLearningKit
77
extension Collection {
88
/// Return a copy of `self` with its elements shuffled
99
func shuffle() -> [Iterator.Element] {
@@ -161,7 +161,7 @@ class Layer {
161161

162162
// TODO: Make private method
163163
func generateRandomBiases() -> Matrix<Float> {
164-
164+
165165
var biasValues: [Float] = []
166166

167167
for i in 0..<layerSize!.columns {
@@ -215,7 +215,7 @@ class Layer {
215215

216216

217217

218-
// Feed Forward Implementation
218+
// Feed Forward Implementation
219219
class NeuralNetwork {
220220

221221
public var layers: [Layer] = []
@@ -289,7 +289,7 @@ class NeuralNetwork {
289289
// Output Layer Delta
290290
var delta = layers.last?.produceOuputError(cost: outputError)
291291

292-
// Set the change in weights and bias for the last layer
292+
// Set the change in weights and bias for the last layer
293293
self.layers.last?.Δb = delta
294294

295295
var activationValuesforTheSecondToLastLayer = layers[layers.count-2].activationValues
@@ -354,4 +354,4 @@ print(nn.feedforward(input: input3))
354354
print(nn.feedforward(input: input4))
355355
*/
356356

357-
357+
*/

‎MLKit-PlayGround.playground/timeline.xctimeline‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,12 @@
33
version ="3.0">
44
<TimelineItems>
55
<LoggerValueHistoryTimelineItem
6-
documentLocation ="#CharacterRangeLen=0&amp;CharacterRangeLoc=9641&amp;EndingColumnNumber=26&amp;EndingLineNumber=242&amp;StartingColumnNumber=1&amp;StartingLineNumber=241&amp;Timestamp=513204866.518704"
6+
documentLocation ="#CharacterRangeLen=0&amp;CharacterRangeLoc=9712&amp;EndingColumnNumber=26&amp;EndingLineNumber=242&amp;StartingColumnNumber=1&amp;StartingLineNumber=241&amp;Timestamp=513273241.954729"
77
selectedRepresentationIndex ="0"
88
shouldTrackSuperviewWidth ="NO">
99
</LoggerValueHistoryTimelineItem>
1010
<LoggerValueHistoryTimelineItem
11-
documentLocation ="#CharacterRangeLen=0&amp;CharacterRangeLoc=2798&amp;EndingColumnNumber=26&amp;EndingLineNumber=118&amp;StartingColumnNumber=9&amp;StartingLineNumber=118&amp;Timestamp=513202826.105433"
11+
documentLocation ="#CharacterRangeLen=0&amp;CharacterRangeLoc=2867&amp;EndingColumnNumber=26&amp;EndingLineNumber=118&amp;StartingColumnNumber=9&amp;StartingLineNumber=118&amp;Timestamp=513273241.954952"
1212
selectedRepresentationIndex ="0"
1313
shouldTrackSuperviewWidth ="NO">
1414
</LoggerValueHistoryTimelineItem>

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp