Skip to content

Instantly share code, notes, and snippets.

@atarabi
Created January 9, 2016 16:29
Show Gist options
  • Select an option

  • Save atarabi/b3db7d007e24f5cc3519 to your computer and use it in GitHub Desktop.

Select an option

Save atarabi/b3db7d007e24f5cc3519 to your computer and use it in GitHub Desktop.

Revisions

  1. atarabi created this gist Jan 9, 2016.
    39 changes: 39 additions & 0 deletions ae_python_chainer.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,39 @@
    import numpy as np
    import chainer
    from chainer import Chain, optimizers, serializers, Variable, cuda
    import chainer.functions as F
    import chainer.links as L


    class Model(Chain):
    def __init__(self, n_units=10):
    super(Model, self).__init__(
    l1=L.Linear(3, n_units),
    l2=L.Linear(n_units, n_units),
    l3=L.Linear(n_units, n_units),
    l4=L.Linear(n_units, 3),
    )

    def __call__(self, x):
    h = F.relu(self.l1(x))
    h = F.relu(self.l2(h))
    h = F.relu(self.l3(h))
    return self.l4(h)



    def train(model, optimizer, x_data, y_data):
    batch_size = 100
    N = x_data.shape[0]
    indices = np.random.permutation(N)
    sum_loss = 0.0
    for i in range(0, N, batch_size):
    xs = Variable(cuda.to_gpu(x_data[indices[i:i + batch_size]]))
    ts = Variable(cuda.to_gpu(y_data[indices[i:i + batch_size]]))
    ys = model(xs)
    model.zerograds()
    loss = F.mean_squared_error(ys, ts)
    sum_loss += loss.data * len(ts.data)
    loss.backward()
    optimizer.update()
    return sum_loss / N