Untitled

mail@pastecode.io avatar
unknown
plain_text
2 years ago
4.5 kB
11
Indexable
Never
# needed libraries
import pandas as pd
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import label, regionprops, find_contours
import tensorflow as tf
import keras
# from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input, UpSampling2D, Concatenate, Dense
# from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from keras import backend as K


train = pd.read_csv('/kaggle/input/airbus-ship-detection/train_ship_segmentations_v2.csv')

# an example of what these images and their boundaries look like
def find_mask(encoded_pixels, size):
    my_img = []

    for i in range(0, len(encoded_pixels), 2):
        steps = encoded_pixels[i+1]
        start = encoded_pixels[i]

        pos_of_pixels = [start+j for j in range(steps)]
        my_img.extend(pos_of_pixels)

    mask_img = np.zeros((size**2), dtype=np.uint8)
    mask_img[my_img] = 255
    mask = np.reshape(mask_img, (size, size)).T

    return mask

# find contour on the image
def find_contour(encoded_pixels):
    mask = find_mask(encoded_pixels, 768)
    height, width = mask.shape
    border = np.zeros((height, width))

    contours = find_contours(mask)
    for contour in contours:
        for example in contour:
            x = int(example[0])
            y = int(example[1])
            border[x][y] = 255

    return border

# gives coordinates of bounding box for our object
def find_coords(mask):
    border = find_contour(mask)
    y = label(border)
    props = regionprops(y)

    for prop in props:
        x1 = prop.bbox[1]
        y1 = prop.bbox[0]

        x2 = prop.bbox[3]
        y2 = prop.bbox[2]

    return x1, y1, x2, y2

# shuffle our dataset and take from there 5k imgs
np.random.seed(0)
np.random.shuffle(train.values)


n = 300

# take arrays of images and coordinates for those imgs
imgs = []
coords = []
for i in range(n):
    img = cv.imread('/kaggle/input/airbus-ship-detection/train_v2/'+train['ImageId'][i]) / 255.0
    img = img.astype(np.float32)
    coord = np.zeros((img.shape[0], img.shape[1], 1))
    if type(train['EncodedPixels'][i]) == str:
        encoded_pixels = [int(i) for i in train['EncodedPixels'][i].split()]
        coord = find_mask(encoded_pixels, 768)
        coord = coord / 255.0
        coord = coord.astype(np.float32)
        coord = np.expand_dims(coord, axis=-1)

    imgs.append(img)
    coords.append(coord)


imgs = np.array(imgs, dtype=np.float32)
coords = np.array(coords, dtype=np.float32)


from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
    
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x

def encoder_block(inputs, num_filters):
    x = conv_block(inputs, num_filters)
    p = MaxPool2D((2, 2))(x)
    return x, p

def decoder_block(inputs, skip_features, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip_features])
    x = conv_block(x, num_filters)
    return x

def build_unet(input_shape):
    inputs = Input(input_shape)

    s1, p1 = encoder_block(inputs, 64)
    s2, p2 = encoder_block(p1, 128)
    s3, p3 = encoder_block(p2, 256)
    s4, p4 = encoder_block(p3, 512)

    b1 = conv_block(p4, 1024)

    d1 = decoder_block(b1, s4, 512)
    d2 = decoder_block(d1, s3, 256)
    d3 = decoder_block(d2, s2, 128)
    d4 = decoder_block(d3, s1, 64)

    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d4)

    model = Model(inputs, outputs, name="UNET")
    return model

if __name__ == "__main__":
    input_shape = (768, 768, 3)
    model = build_unet(input_shape)
    model.summary()


def dice_coef(y_true, y_pred):
    y_true = tf.keras.layers.Flatten()(y_true)
    y_pred = tf.keras.layers.Flatten()(y_pred)
    intersection = tf.reduce_sum(y_true * y_pred)
    return (2. * intersection) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred))

def dice_loss(y_true, y_pred):
    return 1.0 - dice_coef(y_true, y_pred)



model.compile(optimizer=Adam(learning_rate=0.01), loss=dice_loss, metrics=[dice_coef]) # dice_coef

model.fit(imgs, coords, batch_size=4, epochs=10)