How do i Create a panorama from a linear video?

527 Views Asked by At

I have a linear video in which a camera records an entire lane of strawberry plants in a greenhouse. I want to convert the entire video of the lane into a panorama image or multiple smaller panoramas. I have tried splitting the video into frames and then stitching them based on some open source algorithms using homography and feature matching. However the results are terrible.

This is my code

import cv2
import numpy as np
import glob
import imutils
from modules import *

# Read the frames from input path
frame_input_path = "/Users/akshayacharya/Desktop/Panorama/Bazinga/Test images for final/Frames/*.jpg"

# Define whatever variables necessary

input_img = glob.glob(frame_input_path)
img_path = sorted(input_img)
# Resize the input images if its too big and pano is starting to lag
"""for i in range(0, len(img_path)):
    img = cv2.imread(img_path[i])
    img = cv2.resize(img, (750, 1000))
    cv2.imwrite(img_path[i], img)
"""
tmp = img_path[0]
flag = True
pano = []
i = 1
count = 0
indices = []
k = 1

# First set of panoramas

while i < len(img_path):
    indices.append(i)
    print(i)
    count += 1
    if flag:
        img1 = cv2.imread(tmp, cv2.COLOR_BGR2GRAY)
        img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
        flag = False
    # img1 = cv2.resize(img1, (0, 0), fx=1, fy=1)
    img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
    # img2 = cv2.resize(img2, (0, 0), fx=1, fy=1)

    # Adjust number of features to look for between images. Default is 2000, change it if needed adn see what happens
    orb = cv2.ORB_create(nfeatures=2000)

    keypoints1, descriptors1 = orb.detectAndCompute(img1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(img2, None)

    # Create a BFMatcher object.
    # It will find all of the matching keypoints on two images
    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    # Find matching points
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)

    all_matches = []
    for m, n in matches:
        all_matches.append(m)

    # Finding the best matches
    good = []
    for m, n in matches:
        # vary this distance and see what happens
        ##########     PARAMETER       #######
        if m.distance < 0.9 * n.distance:
            #####################################
            good.append(m)

    ##########     PARAMETER       #######
    MIN_MATCH_COUNT = 20
    #####################################

    if len(good) > MIN_MATCH_COUNT:
        # Convert keypoints to an argument for findHomography
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # Establish a homography
        M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        result = warpImages(img2, img1, M)

    i += 1
    ###### PARAMETER ####### This decides how many frames to stitch at a time
    if count % 10 == 0:
        ########################
        i += 5  ### This parameter decides how many frames to skip
        count = 0
        # result = trim(img1)         #These are three cropping mechanisms
        #result = crop(img1)
        # result = cropping(img1)
        # result = result[:, 25:]

        # Output path for where the smaller panoramas are to be written
        #cv2.imwrite(f"Test images for final/Smaller panoramas/frame{k}.jpg", result)
        result = cv2.resize(result, (750, 1000))
        cv2.imshow("result",result)
        cv2.waitKey(0)

        k += 1   # index of the smaller panorama
        indices = []
        try:
            img1 = cv2.imread(img_path[i])
            # i = i + 1
            ###### RESIZE THE NEXT INPUT IMAGE IF NEEDED ####
            """img1 = cv2.resize(img1, (1080, 1920))
            cv2.imshow("Stitch", result)
            cv2.waitKey(0)"""
        except:
            continue

# This ends the smaller panoramas in batches as specified

#Now if exactly no images are left and the batch and increment leads exactly to the last frame
if len(indices) == 0:
    indices = [0]
    j = 100
# Not sure why i added this
if len(indices) == 7:
    print('Hi')
    indices = [0]
    j = 100 # This means theres nothing lef to do so directly it will eventually go to just stacking

#IF indices length is not 0, ie, a few images are left and need to be stitched

if indices[0] != 0:
    print('Going to stitch last panorama')
    i = 0
    print(indices)
    j = indices[i]
    temp = img_path[j]


#If only one image is left
if j == (len(img_path) - 1):
    img_1 = cv2.imread(temp)  #This is the only image left and so last panorama is just this

#Stitch the last panorama
i = 1
flag1 = True
while i < len(indices):
    if flag1:
        img_1 = cv2.imread(temp, cv2.COLOR_BGR2GRAY)
        j = indices[i]
        img_2 = cv2.imread(img_path[j], cv2.COLOR_BGR2GRAY)
        flag1 = False
    img_1 = cv2.resize(img1, (0, 0), fx=1, fy=1)
    img_2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
    img_2 = cv2.resize(img2, (0, 0), fx=1, fy=1)

    orb = cv2.ORB_create(nfeatures=2000)

    keypoints1, descriptors1 = orb.detectAndCompute(img_1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(img_2, None)

    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    matches = bf.knnMatch(descriptors1, descriptors2, k=2)

    all_matches = []
    for m, n in matches:
        all_matches.append(m)

    # Finding the best matches
    good = []
    for m, n in matches:
        if m.distance < 0.9 * n.distance:    #PARAMETER
            good.append(m)

    MIN_MATCH_COUNT = 20   #PARAMETER

    if len(good) > MIN_MATCH_COUNT:
        # Convert keypoints to an argument for findHomography
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # Establish a homography
        M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        result1 = warpImages(img_2, img_1, M)
        img_1 = result1

    i += 1

#Small panorama is stitched for the last one

if j != 100:
    img_1 = cv2.resize(img_1, (1080, 1920))
    #cv2.imwrite(f"Test images for final/Smaller panoramas/frame{k}.jpg", img_1)
    #This panorama will just be last image or the slast small panorama based on how many frames were left
    cv2.imshow("Last pano", img_1)
    cv2.waitKey(0)
#All panoramas are written and are ready to be stacked

I first take the frames and then resize them if necessary and then start finding features and store the good images if there are a lot of features adn then later perform warping based on the homography matrix. And process a bunch of 10 frames at a time and try to stitch them. However I am not getting the result.

I have attached the drive link of the video I want to stitch. Any suggestions or approach is welcome.

Video file

The first output image of processing the first 10 frames looked like this Output

I have split the video into frames using fmpeg using a frame rate of 4 fps

0

There are 0 best solutions below