Untitled
unknown
plain_text
2 years ago
6.2 kB
3
Indexable
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, GlobalAveragePooling2D import ssl try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context # Load the pre-trained InceptionV3 model without the top (fully connected) layers base_model = InceptionV3(weights='imagenet', include_top=False) # Add custom top layers for fine-tuning x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) # and a logistic layer — let's say we have 2 classes since we have 2 classes predictions = Dense(2, activation='softmax')(x) ## Changes made here # Combine the base model and custom top layers model = Model(inputs=base_model.input, outputs=predictions) for layer in base_model.layers: layer.trainable = False model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'],run_eagerly=True) from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) test_datagen = ImageDataGenerator(rescale=1./255) batch_size = 20 # Define paths to your train and validation datasets train_dataset_path = '/Analytics/venv/Jup/CAPE_Case_Management_PDF_Invoicing/Data/images/Train_dataset_images/' validation_dataset_path = '/Analytics/venv/Jup/CAPE_Case_Management_PDF_Invoicing/Data/images/Validation_dataset_images/' import pandas as pd import os # Create DataFrames with file paths and labels train_invoice_df = pd.DataFrame({'filepath': [os.path.join(train_dataset_path, 'invoice', f) for f in os.listdir(os.path.join(train_dataset_path, 'invoice'))], 'label': 'invoice'}) train_non_invoice_df = pd.DataFrame({'filepath': [os.path.join(train_dataset_path, 'non_invoice', f) for f in os.listdir(os.path.join(train_dataset_path, 'non_invoice'))], 'label': 'non_invoice'}) validation_invoice_df = pd.DataFrame({'filepath': [os.path.join(validation_dataset_path, 'invoice', f) for f in os.listdir(os.path.join(validation_dataset_path, 'invoice'))], 'label': 'invoice'}) validation_non_invoice_df = pd.DataFrame({'filepath': [os.path.join(validation_dataset_path, 'non_invoice', f) for f in os.listdir(os.path.join(validation_dataset_path, 'non_invoice'))], 'label': 'non_invoice'}) # Concatenate the dataframes train_df = pd.concat([train_invoice_df, train_non_invoice_df], ignore_index=True) validation_df = pd.concat([validation_invoice_df, validation_non_invoice_df], ignore_index=True) # Shuffle the dataframes train_df = train_df.sample(frac=1).reset_index(drop=True) validation_df = validation_df.sample(frac=1).reset_index(drop=True) # Create data generators train_generator = train_datagen.flow_from_dataframe( train_df, x_col='filepath', y_col='label', target_size=(299, 299), batch_size=batch_size, class_mode='binary' ) validation_generator = test_datagen.flow_from_dataframe( validation_df, x_col='filepath', y_col='label', target_size=(299, 299), batch_size=batch_size, class_mode='binary' ) epochs =8 print("Number of training samples:", len(train_generator)) print("Number of validation samples:", len(validation_generator)) print("Class indices for training:", train_generator.class_indices) print("Class indices for validation:", validation_generator.class_indices) model.fit( train_generator, steps_per_epoch=train_generator.samples // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=validation_generator.samples // batch_size ) model.save('Fine_tuned_invoice_model_v2_dec5.h5') And we have made changes in this line- predictions = Dense(2, activation='softmax')(x) ## Changes made here As no of classes we have = 2, but now we are getting error like this- Epoch 1/8 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Input In [1], in <cell line: 107>() 102 print("Class indices for training:", train_generator.class_indices) 103 print("Class indices for validation:", validation_generator.class_indices) --> 107 model.fit( 108 train_generator, 109 steps_per_epoch=train_generator.samples // batch_size, 110 epochs=epochs, 111 validation_data=validation_generator, 112 validation_steps=validation_generator.samples // batch_size 113 ) 115 model.save('Fine_tuned_invoice_model_v2_dec5.h5') File /Analytics/venv/CAPEANALYTICS/lib/python3.8/site-packages/keras/src/utils/traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs) 67 filtered_tb = _process_traceback_frames(e.__traceback__) 68 # To get the full stack trace, call: 69 # `tf.debugging.disable_traceback_filtering()` ---> 70 raise e.with_traceback(filtered_tb) from None 71 finally: 72 del filtered_tb File /Analytics/venv/CAPEANALYTICS/lib/python3.8/site-packages/keras/src/backend.py:5809, in binary_crossentropy(target, output, from_logits) 5805 output, from_logits = _get_logits( 5806 output, from_logits, "Sigmoid", "binary_crossentropy" 5807 ) 5808 if from_logits: -> 5809 return tf.nn.sigmoid_cross_entropy_with_logits( 5810 labels=target, logits=output 5811 ) 5813 epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) 5814 output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) ValueError: `logits` and `labels` must have the same shape, received ((20, 2) vs (20, 1)).
Editor is loading...
Leave a Comment