I would like to introduce a new activation function in python 2.7 tensorflow. However, I tried by looking at some references, but mostly it is implemented in python3. I implemented it by rewriting it to python 2, but I get the same error every time.

```
class Mylayer(tf.keras.layers.Layer):
def __init__(self, output_units, *args, **kwargs):
self.output_units=output_units
super(Mylayer, self).__init__(**kwargs)
def build(self,input_shape):
super(Mylayer, self).build(input_shape)
self.kernel = self.add_variable(“kernel”,shape
[input_shape[-1].value,self.output_units])
def call(self,x):
if x<0:
newfx=0
else :
newfx=x*1.5
return tf.matmul(newfx,self.kernel)
```

This is my activation function.

```
self.inputs = tf.placeholder(shape=[1,4], dtype=tf.float32)
self.weights1 = tf.Variable(tf.truncated_normal([4,4]))
self.bias1 = tf.Variable(tf.zeros(shape=[1,4]))
self.weights2 = tf.Variable(tf.truncated_normal([4,4]))
self.bias2 = tf.Variable(tf.zeros(shape=[1, 4]))
self.weights3 = tf.Variable(tf.truncated_normal([4,1]))
self.bias3 = tf.Variable(tf.zeros([1,1]))
self.layer1 = tf.tanh(tf.matmul(self.inputs, self.weights1) + self.bias1)
self.layer2 = tf.tanh(tf.matmul(self.layer1, self.weights2) + self.bias2)
self.layer3 = Mylayer(tf.matmul(self.layer2, self.weights3) + self.bias3)
self.output_layer = self.layer3
```

This is my network.The last session etc are omitted.

```
Traceback (most recent call last):
File “capture.py”, line 832, in <module>
options = readCommand( sys.argv[1:] ) # Get game components based on input
File “capture.py”, line 683, in readCommand
redAgents = loadAgents(True, options.red, nokeyboard, redArgs)
File “capture.py”, line 755, in loadAgents
return createTeamFunc(indices[0], indices[1], isRed, **args)
File “/Users/Yuay/research/code1/Pacman-Tournament-Agent/code.py”, line 131, in createTeam
return [eval(first)(firstIndex, **kwargs), eval(second)(secondIndex, **kwargs)]
File “/Users/Yuay/research/code1/Pacman-Tournament-Agent/code.py”, line 212, in __init__
self.loss = tf.reduce_sum(tf.square((self.nextQ - self.output_layer)))
File “/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py”, line 869, in binary_op_wrapper
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name=“y”)
File “/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py”, line 1050, in convert_to_tensor
as_ref=False)
File “/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py”, line 1146, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File “/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/constant_op.py”, line 229, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File “/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/constant_op.py”, line 208, in constant
value, dtype=dtype, shape=shape, verify_shape=verify_shape))
File “/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/tensor_util.py”, line 442, in make_tensor_proto
_AssertCompatible(values, dtype)
File “/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/tensor_util.py”, line 353, in _AssertCompatible
(dtype.name, repr(mismatch), type(mismatch).__name__))
TypeError: Expected float32, got <my.Mylayer object at 0x10385dd50> of type ‘Mylayer’ instead.
```

This is error.

I understand that the returning value is coming by object rather than float, but I do not know how to fix it.
I do not have enough understanding of tensorflow and deep learning. But in keras this implementation works fine.