/* Plain OpenCL implementation of my gradient descent algorithm * This one allows many gradient descents on the same function to happen at once */ // def step(self, gamma=0.1, beta=0.9): // x0 = self.x0 // fx0 = self.eq.evalf(subs={"x":x0[0], "y": x0[1]}) // print("x0: ", x0, "f(x0):", fx0) // grad = np.array([df.evalf(subs={"x": x0[0], "y": x0[1]}) for df in self.deq]) // print("grad: ", grad) // # implementation of momentum // if self.pgrad is None: // self.pgrad = grad // grad = beta * self.pgrad + (1-beta) * grad // self.pgrad = grad // x1 = x0 - gamma * grad // for i in range(len(x1)): // if x1[i] < self.ranges[i][0]: // x1[i] = self.ranges[i][0] // elif x1[i] > self.ranges[i][1]: // x1[i] = self.ranges[i][1] // self.x0 = x1 // return self.eq.evalf(subs={"x": x1[0], "y": x1[1]}) typedef float vec[NVAR]; void eval_deq(vec grad, __global vec x0); float eval_eq(__global const vec x0); __kernel void step(float gamma, float beta, __global vec *x0, __global vec *pgrad){ int gid = get_global_id(0); vec grad; eval_deq(grad, x0[gid]); for (int i=0; i