2021年4月29日星期四

how can i save tensor to numpy array in my customized loss function?

I want to check my middle result when training model.

so, i need to save tensor out in my customized loss.

here is my code:

from util import *  from tensorflow import keras  from tensorflow.keras import layers  import tensorflow as tf  from gen_model import read_cache_data  from numpy import random  from ml_util import *  from catboost import CatBoostRegressor  import warnings  warnings.filterwarnings('ignore')    class myloss(keras.losses.Loss):    def __init__(self, coef, name='myloss'):      super().__init__(name=name)      self.coef = coef      def call(self, y, y_pred):      # I want to save y_pred here, the following is the method i tried, none of them works!!!!!!!!!!      #a = (tf.print(y_pred))      #b = (tf.print(y))      print(type(y_pred))      #sess = tf.Session();      sess = tf.compat.v1.Session()      with sess.as_default(): print(y_pred.eval())      #print(y_pred.eval())      #print(y_pred.numpy())      return tf.math.reduce_mean(tf.square(y - y_pred), axis=1)    def train_mlp(train_df, valid_df, test_df, fv_cols, res_col):    callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)    model = keras.Sequential([layers.Dense(50, input_shape=(len(fv_cols), ), activation='relu'), layers.Dense(30, activation='relu'), layers.Dense(1)])    model.compile(optimizer=keras.optimizers.SGD(0.1), loss = myloss(0.1))    model.summary()    #sess.run(tf.compat.v1.global_variables_initializer())    model.fit(train_df[fv_cols], np.reshape(train_df[res_col].tolist(), (-1, 1)), callbacks=[callback], validation_data=(valid_df[fv_cols], valid_df[res_col]), epochs=100, batch_size=65536)    train_pred = (model.predict(train_df[fv_cols])).flatten()    test_pred = (model.predict(test_df[fv_cols])).flatten()    d = pd.DataFrame([train_pred, train_df[res_col], test_pred, test_df[res_col]]).T    d.columns = ['train_pred', 'train_y', 'test_pred', 'test_y']    print(d)    print('MLP is R2 =', r2(y_pred = train_pred, y = train_df[res_col]))    print('MLP os R2 =', r2(y_pred = test_pred, y = test_df[res_col]))      if __name__ == '__main__':    df = read_cache_data('cache')    df = df.replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna()    fv_cols = df.columns[21:-3]    res_col = 'res_10'    train, test_df = df.iloc[:int(0.5*len(df))], df.iloc[int(0.5*len(df)):]    train = train.sample(frac=1, random_state=1).reset_index(drop=True)    train_df, valid_df = train.iloc[:int(0.7*len(train))], train.iloc[int(0.7*len(train)):]    train_mlp(train_df, valid_df, test_df, fv_cols, res_col)  

I tried some methods, include, eval(), session.run() but none of them worked,
for eval: the error is:

ValueError: Cannot evaluate tensor using `eval()`: No default session is registered. Use `with sess.as_default()` or pass an ex  

for session, the error is:

InvalidArgumentError: You must feed a value for placeholder tensor 'sequential/dense/MatMul/ReadVariableOp/resource' with dtype reso           [[node sequential/dense/MatMul/ReadVariableOp/resource (defined at lstm.py:58) ]]  

can anyone help on this?

https://stackoverflow.com/questions/67327544/how-can-i-save-tensor-to-numpy-array-in-my-customized-loss-function April 30, 2021 at 10:45AM

没有评论:

发表评论