Hello Everyone,
Could you pleas help me out with the error display above while running the following code: My Keras version is '3.3.3.dev2024060803'
import tensorflow as tf import numpy as np import time from tensorflow.keras.layers import Dense import tensorflow_probability as tfp
tf.random.set_seed(42)
class model (tf.keras.Model): def init(self, layers, train_op, num_epoch, print_epoch): super(model, self).init() self.model_layers = layers self.train_op = train_op self.num_epoch = num_epoch self.print_epoch = print_epoch self.adam_loss_hist = []
def call(self, r):
return self.r_val(r)
# Running the model
def r_val(self, r):
r = 2.0*(r - self.bounds["lb"])/(self.bounds["ub"] - self.bounds["lb"]) - 1.0
for l in self.model_layers:
r = l(r)
return r
def Stress(self, r):
stress= 448.4*tf.exp(0.1231*(self.r_val(r))) - 0.409*tf.exp(-6.933*(self.r_val(r)))
return stress
# Return the first derivative
def Strain(self, r):
strain = 0.009312499803*self.r_val(r) +0.009312500112
return strain
def Strain_Energy(self, r, N, a):
def f(r):
Energy = 0.03*0.015*1*(self.Stress(self.r_val(r)))*(self.Strain(self.r_val(r)))
return Energy
value = tf.constant(0, dtype=tf.float64)
value2 = tf.constant(0, dtype=tf.float64)
for N in range (1, N+1):
value += f(a+((N-(1/2))* ((r-a)/N)))
value2= ((r-a)/N)*value
return value2
def RHS(self, r):
Ext_En = ((3*0.03*0.015)/(4*184128.9683))*(self.Stress(self.r_val(r)))**2
return Ext_En
#Custom loss function
def get_loss(self, r):
LHS_Val = self.Strain_Energy(r, 2000, -1)
RHS_Val = self.RHS(r)
int_loss = tf.reduce_mean(tf.math.square(LHS_Val - RHS_Val))
return int_loss
# get gradients
def get_grad(self, r):
with tf.GradientTape() as tape:
tape.watch(self.trainable_variables)
L = self.get_loss(r)
g = tape.gradient(L, self.trainable_variables)
return L, g
# perform gradient descent
def network_learn(self, r):
self.bounds = {"lb" : tf.math.reduce_min(r),
"ub" : tf.math.reduce_max(r)}
for i in range(self.num_epoch):
L, g = self.get_grad(r)
self.train_op.apply_gradients(zip(g, self.trainable_variables))
self.adam_loss_hist.append(L)
if i%self.print_epoch==0:
print("Epoch {} loss: {}".format(i, L))
rmin = -1 rmax = -0.7 numPts = 25 data_type = "float64" a = tf.constant(-1, dtype=tf.float64) rint = np.linspace(rmin, rmax, numPts).astype(data_type) rint = np.array(rint)[np.newaxis].T
define the model
tf.keras.backend.set_floatx(data_type) l1 = tf.keras.layers.Dense(25, "tanh") l2 = tf.keras.layers.Dense(25, "tanh") l3 = tf.keras.layers.Dense(1, None) train_op = tf.keras.optimizers.Adam() num_epoch = 200 print_epoch = 10 pred_model = model([l1, l2, l3], train_op, num_epoch, print_epoch)
convert the training data to tensors
rint_tf = tf.convert_to_tensor(rint)
training
print("Training (ADAM)...") t0 = time.time() pred_model.network_learn(rint_tf) t1 = time.time() print("Time taken (ADAM)", t1-t0, "seconds")
Following Error Appears:
ValueError Traceback (most recent call last) Cell In[46], line 4 2 print("Training (ADAM)...") 3 t0 = time.time() ----> 4 pred_model.network_learn(rint_tf) 5 t1 = time.time() 6 print("Time taken (ADAM)", t1-t0, "seconds")
Cell In[42], line 83, in model.network_learn(self, r) 80 self.bounds = {"lb" : tf.math.reduce_min(r), 81 "ub" : tf.math.reduce_max(r)} 82 for i in range(self.num_epoch): ---> 83 L, g = self.get_grad(r) 84 self.train_op.apply_gradients(zip(g, self.trainable_variables)) 85 self.adam_loss_hist.append(L)
Cell In[42], line 73, in model.get_grad(self, r) 71 def get_grad(self, r): 72 with tf.GradientTape() as tape: ---> 73 tape.watch(self.trainable_variables) 74 L = self.get_loss(r) 75 g = tape.gradient(L, self.trainable_variables)
File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\backprop.py:873, in GradientTape.watch(self, tensor)
864 def watch(self, tensor):
865 """Ensures that tensor
is being traced by this tape.
866
867 Args:
(...)
871 ValueError: if it encounters something that is not a tensor.
872 """
--> 873 for t in _extract_tensors_and_variables(tensor):
874 if not backprop_util.IsTrainable(t):
875 logging.log_first_n(
876 logging.WARN, "The dtype of the watched tensor must be "
877 "floating (e.g. tf.float32), got %r", 5, t.dtype)
File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\backprop.py:700, in _extract_tensors_and_variables(tensor) 698 yield from _extract_tensors_and_variables(components) 699 else: --> 700 raise ValueError(f"Passed in object {obj} of type {type(obj).name!r}" 701 f", not tf.Tensor or tf.Variable or ExtensionType.")
ValueError: Passed in object
Comment From: mehtamansi29
Hi @hgaur0007 -
This issue is related to tensorflow_probability. Can you please create new issue in this repo ?
Comment From: github-actions[bot]
This issue is stale because it has been open for 14 days with no activity. It will be closed if no further activity occurs. Thank you.
Comment From: yajuna
Previously ran code suddenly has ValueError:
ValueError Traceback (most recent call last)
2 frames
/tmp/__autograph_generated_filezxcji763.py in tf__get_grad_of_loss(model, t, t_0, y_0, yt_0) 9 retval_ = ag__.UndefinedReturnValue() 10 with ag__.ld(tf).GradientTape(persistent=True) as tape: ---> 11 ag__.converted_call(ag__.ld(tape).watch, (ag__.ld(model).trainable_variables,), None, fscope) 12 loss = ag__.converted_call(ag__.ld(compute_loss), (ag__.ld(model), ag__.ld(t), ag__.ld(t_0), ag__.ld(y_0), ag__.ld(yt_0)), None, fscope) 13 g = ag__.converted_call(ag__.ld(tape).gradient, (ag__.ld(loss), ag__.ld(model).trainable_variables), None, fscope)
ValueError: in user code:
File "<ipython-input-61-da64e963dba0>", line 7, in train_step *
loss, grad_theta = get_grad_of_loss(model, t, t_0, y_0, yt_0)
File "<ipython-input-59-59c4b4ca0f9a>", line 6, in get_grad_of_loss *
tape.watch(model.trainable_variables)
ValueError: Passed in object <KerasVariable shape=(1, 10), dtype=float32, path=sequential_13/dense_81/kernel> of type 'Variable', not tf.Tensor or tf.Variable or ExtensionType.
Comment From: Iris2762
Did you solve this problem ?I meet the same problem
Comment From: sonali-kumari1
Hi @hgaur0007 -
The ValueError
arises from the get_grad
function in your code, specifically the tape.watch(self.trainable_variables
) line. Since this issue involves GradientTape
, it is related to Tensorflow. Kindly reopen this issue in the Tensorflow Github repository. Thanks!
Comment From: github-actions[bot]
This issue is stale because it has been open for 14 days with no activity. It will be closed if no further activity occurs. Thank you.