dtype=tf.float32, name='conv2d_83_input'), name='conv2d_83_input',
hi was working on denoise autoencoders using my custom png dataset... but on training i get the following error
WARNING:tensorflow:Model was constructed with shape (None, 128, 128, 1) for input KerasTensor(type_spec=TensorSpec(shape=(None, 128, 128, 1), dtype=tf.float32, name='conv2d_83_input'), name='conv2d_83_input', description="created by layer 'conv2d_83_input'"), but it was called on an input with incompatible shape (None,).
not able to understand what exactly is wrong in here.
do you know?
how many words do you know
See also questions close to this topic

Only download certain label tf dataset
Looking to do some fine tuning. The dataset (found here: https://knowyourdatatfds.withgoogle.com/#dataset=sun397&filters=kyd%2Fsun397%2Flabel:%2Fh%2Fhouse&tab=ITEM&select=kyd%2Fsun397%2Flabel&item=%2Fh%2Fhouse%2Fsun_blpzjomvikwtulrq.jpg&expanded_groups=sun397) that Im trying to finetune w is pretty large and i just want to use/download the images with label /h/house. Any tips on how I can best accomplish this? Thanks!
import tensorflow as tf import tensorflow_hub as hub import tensorflow_datasets as tfds import numpy as np import matplotlib.pyplot as plt import functools import pandas (train_ds, valid_ds), info = tfds.load("sun397", split=["train", "validation"], as_supervised=True, with_info=True, label = "/h/house") int_to_class_label = info.features['label'].int2str

TFF: How can I train any model using a server running tffruntime and a client running tffclient?
I read all the tensorflowfederated tutorials, including this one https://www.tensorflow.org/federated/gcp_setup, but I couldn't understand how to use this for training a model.
I'm doing a graduation project, to start I need to do this POC using tensorflowfederated to train a model with one server and one client in order to apply crosssilo setup for recognition of organs affected by covid in the future. If anyone can point me a direction, I'd be very grateful.

Can't use Keras MeanIoU to train semantic segmentation model
I'm working on a binary semantic segmentation problem. I built an UNet model with MobileNetV2 backbone. Here is my model code:
def upsample(filters, size, apply_dropout=False): initializer = tf.random_normal_initializer(0., 0.02) layer = Sequential() layer.add(layers.Conv2DTranspose(filters, size, strides=2, padding='same', kernel_initializer=initializer, use_bias=False)) layer.add(layers.BatchNormalization()) if apply_dropout: layer.add(layers.Dropout(0.5)) layer.add(layers.ReLU()) return layer def UNet(image_size, num_classes): inputs = Input(shape=image_size + (3,)) base_model = applications.MobileNetV2(input_shape=image_size + (3,), include_top=False) layer_names = [ 'block_1_expand_relu', 'block_3_expand_relu', 'block_6_expand_relu', 'block_13_expand_relu', 'block_16_project', ] base_model_outputs = [base_model.get_layer(name).output for name in layer_names] down_stack = Model(inputs=base_model.input, outputs=base_model_outputs) down_stack.trainable = False up_stack = [ upsample(512, 3), upsample(256, 3), upsample(128, 3), upsample(64, 3) ] skips = down_stack(inputs) x = skips[1] skips = reversed(skips[:1]) for up, skip in zip(up_stack, skips): x = up(x) x = layers.Concatenate()([x, skip]) outputs = layers.Conv2DTranspose(filters=num_classes, kernel_size=3, strides=2, padding='same')(x) return Model(inputs, outputs)
To load the images and masks for training, I built an image loader inherits from
keras.Sequnce
.class ImageLoader(utils.Sequence): def __init__(self, batch_size, img_size, img_paths, mask_paths): self.batch_size = batch_size self.img_size = img_size self.img_paths = img_paths self.mask_paths = mask_paths def __len__(self): return len(self.mask_paths) // self.batch_size def __getitem__(self, idx): i = idx * self.batch_size batch_img_paths = self.img_paths[i:i + self.batch_size] batch_mask_paths = self.mask_paths[i:i + self.batch_size] x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype='float32') for j, path in enumerate(batch_img_paths): img = utils.load_img(path, target_size=self.img_size) img = utils.img_to_array(img) x[j] = img y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype='uint8') for j, path in enumerate(batch_mask_paths): img = utils.load_img(path, target_size=self.img_size, color_mode='grayscale') img = utils.img_to_array(img) # [0, 255] > [0, 1] img //= 255 y[j] = img return x, y
In my segmentation problem, all the labels are in the range [0, 1]. However, when I try to compile and then fit the model using Adam optimizer, Sparse categorical cross entropy loss and metric
tf.keras.metrics.MeanIoU
, I encountered with the following problem:Node: 'confusion_matrix/assert_non_negative_1/assert_less_equal/Assert/AssertGuard/Assert' 2 root error(s) found. (0) INVALID_ARGUMENT: assertion failed: [`predictions` contains negative values. ] [Condition x >= 0 did not hold elementwise:] [x (confusion_matrix/Cast:0) = ] [1 1 1...] [[{{node confusion_matrix/assert_non_negative_1/assert_less_equal/Assert/AssertGuard/Assert}}]] [[confusion_matrix/assert_less_1/Assert/AssertGuard/pivot_f/_31/_67]] (1) INVALID_ARGUMENT: assertion failed: [`predictions` contains negative values. ] [Condition x >= 0 did not hold elementwise:] [x (confusion_matrix/Cast:0) = ] [1 1 1...] [[{{node confusion_matrix/assert_non_negative_1/assert_less_equal/Assert/AssertGuard/Assert}}]]
At first, I used accuracy as a metrics for training and I didn't encounter this problem, however when I changed to MeanIoU, this problem happened. Does anyone know how to fix this problem? Thank you very much!
UPDATE: I've searched on StackOverflow and found this question about a similar error, however the fix mentioned in that link (reduce learning rate) doesn't work in my case.

how to print all parameters of a keras model
I am trying to print all the 1290 parameters in
dense_1
layer, butmodel.get_weights()[7]
only show 10 parameters. How could I print all the 1290 parameters ofdense_1
layer? What is the difference betweenmodel.get_weights()
andmodel.layer.get_weights()
>model.get_weights()[7] array([2.8552295e04, 4.3254648e03, 1.8752701e04, 2.3482188e03, 3.4848123e04, 7.6121779e04, 2.7494309e06, 1.9068648e03, 6.0777756e04, 1.9550985e03], dtype=float32) >model.summary() Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 26, 26, 32) 320 conv2d_1 (Conv2D) (None, 24, 24, 64) 18496 max_pooling2d (MaxPooling2D (None, 12, 12, 64) 0 ) dropout (Dropout) (None, 12, 12, 64) 0 flatten (Flatten) (None, 9216) 0 dense (Dense) (None, 128) 1179776 dropout_1 (Dropout) (None, 128) 0 dense_1 (Dense) (None, 10) 1290 _________________________________________________________________ ================================================================= Total params: 1,199,882 Trainable params: 1,199,882 Nontrainable params: 0 _________________________________________________________________

Training plot is not appearing properly for keras model
I have data where I need to train it with X and Y. Traning part is done but when I want to plot the prediction and actual data, it is appearing with so many lines instead of showing just nonlinear regression line.
model= Sequential() model.add(Dense(7,input_dim=1, activation="tanh")) model.add(Dense(1)) model.compile(loss="mse", optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics= ["mae"]) history=model.fit(X,Y,epochs=1000) predict=model.predict(X) plt.scatter(X, Y,edgecolors='g') plt.plot(X, predict,'r') plt.legend([ 'Predictated Y' ,'Actual Y']) plt.show()
Please see the attached imageplotting image

How would I put my own dataset into this code?
I have been looking at a Tensorflow tutorial for unsupervised learning, and I'd like to put in my own dataset; the code currently uses the MNIST dataset. I know how to create my own datasets in Tensorflow, but I have trouble setting the code used here to my own. I am pretty new to Tensorflow, and the filepath to my dataset in my project is
\data\training
and\data\testval\
# Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # ScikitLearn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" # TensorFlow ≥2.0preview is required import tensorflow as tf from tensorflow import keras assert tf.__version__ >= "2.0" # Common imports import numpy as np import os (X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data() X_train_full = X_train_full.astype(np.float32) / 255 X_test = X_test.astype(np.float32) / 255 X_train, X_valid = X_train_full[:5000], X_train_full[5000:] y_train, y_valid = y_train_full[:5000], y_train_full[5000:] def rounded_accuracy(y_true, y_pred): return keras.metrics.binary_accuracy(tf.round(y_true), tf.round(y_pred)) tf.random.set_seed(42) np.random.seed(42) conv_encoder = keras.models.Sequential([ keras.layers.Reshape([28, 28, 1], input_shape=[28, 28]), keras.layers.Conv2D(16, kernel_size=3, padding="SAME", activation="selu"), keras.layers.MaxPool2D(pool_size=2), keras.layers.Conv2D(32, kernel_size=3, padding="SAME", activation="selu"), keras.layers.MaxPool2D(pool_size=2), keras.layers.Conv2D(64, kernel_size=3, padding="SAME", activation="selu"), keras.layers.MaxPool2D(pool_size=2) ]) conv_decoder = keras.models.Sequential([ keras.layers.Conv2DTranspose(32, kernel_size=3, strides=2, padding="VALID", activation="selu", input_shape=[3, 3, 64]), keras.layers.Conv2DTranspose(16, kernel_size=3, strides=2, padding="SAME", activation="selu"), keras.layers.Conv2DTranspose(1, kernel_size=3, strides=2, padding="SAME", activation="sigmoid"), keras.layers.Reshape([28, 28]) ]) conv_ae = keras.models.Sequential([conv_encoder, conv_decoder]) conv_ae.compile(loss="binary_crossentropy", optimizer=keras.optimizers.SGD(lr=1.0), metrics=[rounded_accuracy]) history = conv_ae.fit(X_train, X_train, epochs=5, validation_data=[X_valid, X_valid]) conv_encoder.summary() conv_decoder.summary() conv_ae.save("\models")
Do note that I got this code from another StackOverflow answer.

Comparison between object detection algorithm speeds
I am writing my final degree project and I am having trouble to compare different algorithms in the state of the art. I am comparing ResNet, MobileNet SSD, YOLOv4, VGG16, and VGG19 used in embedded devices such as Jetson Nano or Raspberry pi. All algorithms are used for object detection but I am unable to find information about which one is faster or usually gets a higher accuracy. Also, I was looking if they can be used in lowperformance devices. I would be grateful if someone is able to help me.
Thanks in advance.

Every time I train my CNN on matlab, is it remembering the old weights from the previous time I trained it? Or does it reset them?
So for example, I have trained a CNN on my data using a learning rate of 0.0003 and 10 epochs, with a minibatch size of 32. After training it, lets say I get an accuracy of 0.7. Now I want to adjust the learning rate and the minibatch size and try training it again to see how the accuracy changes, using the trainNetwork Matlab function. My question is, is it training the model from scratch or is it training them using the weights previously calculated? I want it to start from scratch to prevent overfitting every time I adjust the hyperparamters. Sorry if this is intuitive and I'm being dumb lol I just wanna make sure.

How to make a chatbot for discord using python
I need advise and/or resources to make a chatbot for discord in python, i have some knowledge of python and the discord api but I know nothing about chat bots or how to implement them in python, can anyone lead me to resources about chatbots and artificial intelligence?

Image Background Remover Using Python
I want to make Image Background Remover Using Python But I do not know how much data It will take and time to reach the accuracy of remove.bg I'm using U2Net Ai models https://github.com/xuebinqin/U2Net/ Some results are same but not every result is as good enough as remove.bg In a rating I would tell my app as 2/5 and remove.bg as 4/5 Please tell me How can I achieve accuracy like remove.bg Any help or suggestions are appreciated. Thanks

Likelihood quantification of encodings in Variational Autoencoder
Let's say I have a trained Variational Autoencoder which is trained on images of dogs, x, and the latent space z consists of 3 neurons. The latent space would then be modelled like:
class NormalDistDecoder(nn.Module): """ VAE latent space as a normal distribution """ def __init__(self, num_feat_in, latentD): super(NormalDistDecoder, self).__init__() self.mu = nn.Linear(num_feat_in, latentD) self.logvar = nn.Linear(num_feat_in, latentD) def forward(self, Xout): return torch.distributions.normal.Normal(self.mu(Xout), F.softplus(self.logvar(Xout)))
Where the mean and standard deviation can be extracted like:
self.enc_latent = NormalDistDecoder(self.encoder_output_size, LatentD = 3) ... q_z = self.enc_latent(EncoderOutput) q_z_sample = q_z.rsample() # sample from latent mean = q_z.mean # Mean of latent std = q_z.scale # std of latent
What I want:
Say that I give the model two different images in has never seen before; one of a dog and one of a bird. I want some kind of number (ex. a likelihood) that quantifies how well the latent space of the two images belong to the family of latent spaces the model has been trained on (images of dogs).
I would expect that the latent mean of the dog image would be a lot closer to the zero mean enforced by the KLloss in training while the latent mean of the bird would be further out. Something like:
mean_dog = [0.1, 0.002, 0.003] mean_bird = [0.6, 1.2, 2.003]
How would I go about doing this?

using autoencoder to reduce dimension of a set of vectors
I want my autoencoder to take as an input a 21300 values vector (the values range from 7 to +7 and are of the type float) and reduce its dimension into 40 values (i.e i want to use the encoder of my autoencoder). Here is my code so far
import numpy as np import nltk import pandas as pd from gensim.models import Word2Vec, KeyedVectors import tensorflow as tf import keras import matplotlib.pyplot as plt def prepare_data(test_data,index): if(index!=1): x_test=test_data[index] else: x_test=test_data x_test=x_test.astype(np.float32) x_test=np.array([x_test]) return x_test bigvector=np.loadtxt("../input/myData/movie_vectors.out") print(bigvector[0,0]) #test_data = np.loadtxt("../input/myData/test_set_normal.out" ,delimiter=',', usecols=range(21300), dtype=float) x_train=prepare_data(bigvector[0,],1) encoder_input = keras.Input(shape=(21300,), name='sum') encoded = keras.layers.Dense(units=12000, activation='relu')(encoder_input) encoded = keras.layers.Dense(units=7500, activation='relu')(encoded) encoded = keras.layers.Dense(units=3000, activation='relu')(encoded) encoded = keras.layers.Dense(units=1000, activation='relu')(encoded) encoded = keras.layers.Dense(units=500, activation='relu')(encoded) encoder = keras.layers.Dense(units=40, activation='relu',name='encoder')(encoded) decoded = keras.layers.Dense(units=500, activation='relu')(encoder) decoded = keras.layers.Dense(units=1000, activation='relu')(decoded) decoded = keras.layers.Dense(units=3000, activation='relu')(decoded) decoded = keras.layers.Dense(units=7500, activation='relu')(decoded) decoded = keras.layers.Dense(units=12000, activation='relu')(decoded) decoder_output = keras.layers.Dense(units=21300, activation='relu')(decoded) opt = tf.keras.optimizers.Adam(learning_rate=0.001, decay=1e6) autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder') autoencoder.compile(opt, loss='mse') epochs=2 for epoch in range(epochs): history = autoencoder.fit( x_train, x_train, epochs=1, batch_size=1 ) autoencoder.save(f"models/AE{epoch+1}.model") #encoder_input.save(f"models/EE{epoch+1}.model") #USING THE MIDDLE LAYER encoder = keras.Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('encoder').output) x_test=prepare_data(bigvector[1,],1) encoded_states = encoder.predict(x_test) print("movie_small_vector:\n"+str(encoded_states[0]))
this code works and it does return a 40 values vector, however i'am not sure if its doing a good job at reducing the dimensions, the 40values vector returned contains many zero values(3040%)is there anyway i can verify if its doing a good job at reducing the dimension or not? how can i further improve my code for better performance/results?

How to use VAE encoded feature to unsupervised anomaly detection
I'm trying to implement VAE by replicating the methodology found in the paper "Unsupervised Anomaly Detection Using Variational AutoEncoder based Feature Extraction". https://ieeexplore.ieee.org/document/8819434 or here.
My question is in order to apply the encoded layer output to the unsupervised anomaly detections, how this was implemented? What I have so far is I save the h5 model after creating layers for the encoder. However when I load the model, there's a warning saying the model doesn't include the training part.
Any help would be appreciated. Thank you