Created
July 16, 2014 13:24
-
-
Save rdegraci/fda065d761ce510d0d97 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| train_set = [ # A AND NOT B | |
| [0,0,0], | |
| [0,1,0], | |
| [1,0,1], | |
| [1,1,0], | |
| ] | |
| n=30 | |
| layers = [2, 2, 1] | |
| def log(*s) puts "// " + (s.is_a?(Array) ? s : [s]).map(&:inspect).join("\n// ") end | |
| class Neuron | |
| attr_accessor :name, :parent_nodes, :weights, :value | |
| def initialize(name, prev_layer = []) | |
| self.name = name | |
| self.parent_nodes = [] | |
| self.weights = [] | |
| prev_layer.each { |n| self.addParentNode(n) } | |
| self.value = 0 | |
| end | |
| def addParentNode(n) | |
| self.parent_nodes << n | |
| self.weights << 0.1 # start value | |
| end | |
| def calculate(o=-1) return value if parent_nodes.empty? | |
| self.value = sig(self.parent_nodes.reduce(0) { |s,n| o+=1; s + weights[o] * n.value }) | |
| end | |
| def learn(expected) | |
| parent_nodes.each.with_index do |parent, i| | |
| self.weights[i] += rate * (expected-self.value) * parent.value | |
| end | |
| end | |
| def sig(n) (1 / (1+2.71**(0-n+1))) end | |
| def active?; value > 0.5 end | |
| def to_s; "#{name} (#{(value*100).floor / 100.0})"; end | |
| end | |
| class NeuralNetwork | |
| attr_accessor :layers, :rate | |
| def initialize(*args) | |
| self.rate = 0.3 | |
| self.layers = [] | |
| args.each.with_index { |s,i| addLayer("layer#{i}", s) } | |
| end | |
| def addLayer(name, size, prev_layer=[]) | |
| self.layers << (1..size).map { |i| n = Neuron.new "#{name}_#{i}", (layers.last||[]) } | |
| end | |
| def setInputs(input) input.each_with_index { |v,i| layers.first[i].value=v } end | |
| def classify(test) setInputs(test);layers.each{|l|l.each{|n|n.calculate}};layers.last end | |
| def train(test, expected, log_for=false) | |
| # the general delta-weight (change required in weight to learn stuff) | |
| # is rate * (expected - observed) * derivative_of_activation_function * node_value | |
| # derivative_of_activation_function for our sigmoid is observed * (1 - observed) | |
| prediction = classify(test) | |
| layers.each do |layer| | |
| layer.each do |node| | |
| node.parent_nodes.each.with_index do |parent, i| | |
| derivative_of_activation_function = node.value * (1 - node.value) | |
| linear_change = rate * (expected-node.value) * parent.value | |
| change = derivative_of_activation_function * linear_change | |
| node.weights[i] += change | |
| end | |
| end | |
| end | |
| end | |
| def output; layers.last.map(&:value) end | |
| def toDot(prefix="") | |
| layers.reverse.map.with_index do |layer, level| | |
| layer.map do |node| | |
| "\"#{prefix}#{node}\" [style=filled fillcolor=#{node.active? ? "cyan" : "white"}]\n" + | |
| node.parent_nodes.map.with_index do |n,i| | |
| "\"#{prefix}#{n}\"->\"#{prefix}#{node}\" [label=#{(node.weights[i]*1000).floor / 1000.0}]" | |
| end.join("\n") | |
| end.join("\n") | |
| end.join("\n") | |
| end | |
| end | |
| net = NeuralNetwork.new *layers | |
| o=0 | |
| n.times do |i| | |
| train_set.each do |b| | |
| net.train(b[0..1],b.last) | |
| # log "#{o+=1} #{b[0..1].inspect} -> #{b.last} #{((b.last - net.output.first)*100).floor/100.0}" | |
| end | |
| end | |
| puts "digraph a{" | |
| %w(a b c d e f g h i j k l m n o p q r s t u v w x y z)[0...train_set.length].each.with_index do |a, i| | |
| net.classify(train_set[i][0..1]) | |
| puts net.toDot(a+'_') | |
| end | |
| puts "}" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment