Untitled

 avatar
unknown
plain_text
2 years ago
9.4 kB
6
Indexable
imgCenter = cv2.imread('SourceImages/sac_center.png', cv2.IMREAD_COLOR)
imgRight  = cv2.imread('SourceImages/sac_r.png', cv2.IMREAD_COLOR)
imgLeft   = cv2.imread('SourceImages/sac_l.png', cv2.IMREAD_COLOR)

# initalize the stitched image as the center image
imgCenter = cv2.copyMakeBorder(imgCenter,1000,1000,1000,1000,cv2.BORDER_CONSTANT)
print(imgLeft.shape)
print(imgCenter.shape)
print(imgRight.shape)

# blend two images
def alpha_blend(img_A, img_B):
    # Implement alpha_blending, using 0.5 and 0.5 for alphas
    ##########--WRITE YOUR CODE HERE--##########
    alpha = 0.5
    beta = 0.5
    gamma = 0.0  # This is a scalar added to each sum

    blended = cv2.addWeighted(img_A, alpha, img_B, beta, gamma)

    ##########-------END OF CODE-------##########
    return blended


def Laplacian_Blending(img_A, img_B, mask, num_levels=5):
    # Implement Laplacian_blending
    # assume mask is float32 [0,1], it has the same size to img_A and img_B
    # the mask indicates which parts of img_A or img_B are blended together
    # num_levels is the number of levels in the pyramid
    assert img_A.shape==img_B.shape
    assert img_A.shape==mask.shape
    ##########--WRITE YOUR CODE HERE--##########
    mask = np.clip(mask, 0, 1)
    gaussian_a = img_A.copy()
    gaussian_b = img_B.copy()
    gaussian_mask = mask.copy()
    gaussian_pyramid_A = [gaussian_a]
    gaussian_pyramid_B = [gaussian_b]
    gaussian_pyramid_mask = [gaussian_mask]

    for i in range(num_levels):
        gaussian_a = cv2.pyrDown(gaussian_a)
        gaussian_b = cv2.pyrDown(gaussian_b)
        gaussian_mask = cv2.pyrDown(gaussian_mask)
        
        gaussian_pyramid_A.append(np.float32(gaussian_a))
        gaussian_pyramid_B.append(np.float32(gaussian_b))
        gaussian_pyramid_mask.append(np.float32(gaussian_mask))

    # Generate Laplacian pyramids for each image
    laplacian_pyramid_A = [gaussian_pyramid_A[num_levels-1]]
    laplacian_pyramid_B = [gaussian_pyramid_B[num_levels-1]]
    laplacian_pyramid_mask = [gaussian_pyramid_mask[num_levels-1]]

    for i in range(num_levels-1, 0, -1):
        size = (gaussian_pyramid_A[i - 1].shape[1], gaussian_pyramid_A[i - 1].shape[0])

        laplacian_A = gaussian_pyramid_A[i - 1] - cv2.pyrUp(gaussian_pyramid_A[i], dstsize=size)
        laplacian_B = gaussian_pyramid_B[i - 1] - cv2.pyrUp(gaussian_pyramid_B[i], dstsize=size)

        
        laplacian_pyramid_A.append(laplacian_A)
        laplacian_pyramid_B.append(laplacian_B)

        laplacian_pyramid_mask.append(gaussian_pyramid_mask[i - 1])
        

    # Blend the Laplacian pyramids using the Gaussian pyramid of the mask
    blended_pyramid = []
    for laplacian_A, laplacian_B, mask in zip(laplacian_pyramid_A, laplacian_pyramid_B, laplacian_pyramid_mask):
        blended = laplacian_A * mask + laplacian_B * (1 - mask)
        blended = np.clip(blended, 0, 255)
        blended_pyramid.append(blended)

    # Reconstruct the blended image
    blended = blended_pyramid[0]
    for i in range(1, num_levels):
        size = (blended_pyramid[i].shape[1], blended_pyramid[i].shape[0])
        blended = cv2.pyrUp(blended, dstsize=size) + np.float32(blended_pyramid[i])
        blended = np.clip(blended, 0, 255)

    ##########-------END OF CODE-------##########
    return blended

def getTransform(img1, img2):
    ##########--WRITE YOUR CODE HERE--##########
    # compute sift descriptors
    sift = cv2.SIFT_create()
    keypoints_1, descriptors_1 = sift.detectAndCompute(img1, None)
    keypoints_2, descriptors_2 = sift.detectAndCompute(img2, None)

    # find all matches
    matcher = cv2.BFMatcher()
    matches = matcher.knnMatch(descriptors_1, descriptors_2, k=2)

    # apply ratio test, use ratio = 0.75
    good_matches = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good_matches.append(m)

    # draw matches
    img_match = drawMatches(img1, keypoints_1, img2, keypoints_2, good_matches)


    # find perspective transform matrix using RANSAC
    if len(good_matches) > 4:
        src_pts = np.float32([keypoints_1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints_2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    else:
        raise ValueError("Not enough matches are found - {}/{}".format(len(good_matches), 4))

    ##########-------END OF CODE-------##########
    # H is the perspective transform matrix
    # img_match is the image returned by drawMatches
    return H, img_match


def perspective_warping_alpha_blending(imgCenter, imgLeft, imgRight):
    ##########--WRITE YOUR CODE HERE--##########
    # Get homography from right to center
    # img_match_cr is your first output
    # call getTransform to get the transformation from the right to the center image
    H_cr, img_match_cr = getTransform(imgRight, imgCenter)

    # Blend center and right
    # stitched_cr is your second output, returned by alpha_blending
    # call alpha_blending
    warped_right = cv2.warpPerspective(imgRight, H_cr, (imgCenter.shape[1], imgCenter.shape[0]))
    stitched_cr = alpha_blend(imgCenter, warped_right)

    # Get homography from left to stitched center_right
    # img_match_lcr is your third output
    # call getTransform to get the transformation from the left to stitched_cr
    H_lcr, img_match_lcr = getTransform(imgLeft, stitched_cr)

    # Blend left and center_right
    # stitched_lcr is your fourth output, returned by alpha_blending
    # call alpha_blending
    warped_left = cv2.warpPerspective(imgLeft, H_lcr, (stitched_cr.shape[1], stitched_cr.shape[0]))
    
    # Blend center and right
    # Get homography from left to stitched center_right
    stitched_lcr = alpha_blend(stitched_cr, warped_left)

    ##########-------END OF CODE-------##########
    return img_match_cr, stitched_cr, img_match_lcr, stitched_lcr

def perspective_warping_laplacian_blending(imgCenter, imgLeft, imgRight):
    ##########--WRITE YOUR CODE HERE--##########
    # Get homography from right to center
    # call getTransform to get the transformation from the right to the center image
    H_cr, img_match_cr = getTransform(imgRight, imgCenter)

    # Blend center and right
    # stitched_cr is your first bonus output, returned by Laplacian_blending
    # call Laplacian_blending
    warped_right = cv2.warpPerspective(imgRight, H_cr, (imgCenter.shape[1], imgCenter.shape[0]))
    mask_cr = np.ones_like(imgCenter, dtype=np.float32) # mask w ones everywhere
    mask_cr[np.any(warped_right != 0, axis=2)] = 0  # Set mask to 0 in areas occupied by img_r
    stitched_cr = Laplacian_Blending(imgCenter, warped_right, mask_cr, 2)

    # Get homography from left to stitched center_right
    # call getTransform to get the transformation from the left to stitched_cr


    # Blend left and center_right
    # stitched_lcr is your second bonus output, returned by Laplacian_blending
    # call Laplacian_blending


    # Get homography from left to stitched center_right
    H_lcr, img_match_lcr = getTransform(imgLeft, stitched_cr.astype(np.uint8))
    warped_left = cv2.warpPerspective(imgLeft, H_lcr, (stitched_cr.shape[1], stitched_cr.shape[0]))

    # Update mask for blending with the left image
    mask_lcr = np.ones_like(stitched_cr, dtype='float32') # mask w ones
    mask_lcr[np.any(warped_left != 0, axis=2)] = 0  # Set mask to 0 in areas occupied by img_l

    # Blend left and center_right using Laplacian blending
    stitched_lcr = Laplacian_Blending(stitched_cr, warped_left, mask_lcr.astype(np.float32), 2)

    img_match_cr=img_match_cr.astype(np.uint8)
    stitched_cr=stitched_cr.astype(np.uint8)
    img_match_lcr=img_match_lcr.astype(np.uint8)
    stitched_lcr=stitched_lcr.astype(np.uint8)


    ##########-------END OF CODE-------##########
    return img_match_cr, stitched_cr, img_match_lcr, stitched_lcr


img_match_cr, stitched_cr, img_match_lcr, stitched_lcr = perspective_warping_alpha_blending(imgCenter, imgLeft, imgRight)
img_match_cr_lap, stitched_cr_lap, img_match_lcr_lap, stitched_lcr_lap = perspective_warping_laplacian_blending(imgCenter, imgLeft, imgRight)

plt.figure(figsize=(15,30));
plt.subplot(4, 1, 1);
plt.imshow(cv2.cvtColor(img_match_cr, cv2.COLOR_BGR2RGB));
plt.title("center and right matches");
plt.axis('off');
plt.subplot(4, 1, 2);
plt.imshow(cv2.cvtColor(stitched_cr, cv2.COLOR_BGR2RGB));
plt.title("center, right: stitched result");
plt.axis('off');
plt.subplot(4, 1, 3);
plt.imshow(cv2.cvtColor(img_match_lcr, cv2.COLOR_BGR2RGB));
plt.title("left and center_right matches");
plt.axis('off');
plt.subplot(4, 1, 4);
plt.imshow(cv2.cvtColor(stitched_lcr, cv2.COLOR_BGR2RGB));
plt.title("left, center, right: stitched result");
plt.axis('off');
plt.show();

plt.figure(figsize=(15,30));
plt.subplot(4, 1, 1);
plt.imshow(cv2.cvtColor(stitched_cr_lap, cv2.COLOR_BGR2RGB));
plt.title("Laplacian - center, right: stitched result");
plt.axis('off');
plt.subplot(4, 1, 2);
plt.imshow(cv2.cvtColor(stitched_lcr_lap, cv2.COLOR_BGR2RGB));
plt.title("Laplacian - left, center, right: stitched result");
plt.axis('off');
Editor is loading...
Leave a Comment