How can I get my Python GStreamer app to show the webcam feed properly instead of showing garbled green video?

45 Views Asked by At

Trying to build a very simple Python program to open the webcam on my MacBook and display that on the screen. However, I cannot get the Python version of my pipeline to display the webcam video, and I get garbled / scrolling green lines.

Would like to understand where I went wrong and how I can fix the program.

So far, I have the following program:

import os
import sys

import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('GLib', '2.0')
gi.require_version('GObject', '2.0')
from gi.repository import GObject, Gst, GstVideo
GObject.threads_init()
Gst.init(None)

class GstCaps(object):
  def __init__(self, caps_string):
    self.caps_string = caps_string

  def __new__(cls, caps_string):
    cf = Gst.ElementFactory.make('capsfilter', None)
    caps = Gst.Caps.from_string(caps_string)
    cf.set_property('caps', caps)
    return cf


class Webcam(object):
  # macOS, open webcam and display on screen
  # ./gst-launch-1.0 -v -e
  #     avfvideosrc device-index=0 !
  #     "video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle" !
  #     rawvideoparse width=1280 height=720 format=yuy2 !
  #     queue !
  #     autovideoconvert !
  #     autovideosink

  def __init__(self, device_index: int = 0):
    self.mainloop = GObject.MainLoop()

    self.pipeline = Gst.ElementFactory.make('pipeline', 'pipeline')

    self.source = Gst.ElementFactory.make('avfvideosrc', 'source')

    self.source.set_property('device-index', device_index)
    self.caps = GstCaps('video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle')
    # self.source.set_property('caps', caps)

    self.rawvideoparse = Gst.ElementFactory.make('rawvideoparse', 'rawvideoparse')
    self.rawvideoparse.set_property('width', 1280)
    self.rawvideoparse.set_property('height', 720)
    self.rawvideoparse.set_property('format', 'yuy2')

    self.queue = Gst.ElementFactory.make('queue', 'queue')

    self.autovideoconvert = Gst.ElementFactory.make('autovideoconvert', 'autovideoconvert')

    self.autovideosink = Gst.ElementFactory.make('autovideosink', 'autovideosink')

    if (not self.pipeline or
        not self.source or
        not self.caps or
        not self.rawvideoparse or
        not self.queue or
        not self.autovideoconvert or
        not self.autovideosink
    ):
      print('ERROR: Not all elements could be created.')
      sys.exit(1)

    self.pipeline.add(self.source)
    self.pipeline.add(self.rawvideoparse)
    self.pipeline.add(self.queue)
    self.pipeline.add(self.autovideoconvert)
    self.pipeline.add(self.autovideosink)

    linked = self.source.link(self.rawvideoparse)
    linked = linked and self.rawvideoparse.link(self.queue)
    linked = linked and self.queue.link(self.autovideoconvert)
    linked = linked and self.autovideoconvert.link(self.autovideosink)

    if not linked:
      print("ERROR: Elements could not be linked")
      sys.exit(1)

    self.bus = self.pipeline.get_bus()
    self.bus.add_signal_watch()
    self.bus.connect('message::eos', self.on_eos)
    self.bus.connect('message::error', self.on_error)

  def run(self):
    self.pipeline.set_state(Gst.State.PLAYING)
    self.mainloop.run()

  def quit(self):
    self.pipeline.set_state(Gst.State.NULL)
    self.mainloop.quit()

  def on_eos(self, bus, message):
    self.quit()

  def on_error(self, bus, message):
    print(f'ERROR: {message.parse_error()}' )
    self.quit()

webcam = Webcam()
webcam.run()

With this program, I get the following video:

bad_video

When I run the pipeline from the command line, I get the proper video output as seen below:

gst-launch-1.0 -v -e  avfvideosrc device-index=1 ! \
  "video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle" ! \
  rawvideoparse width=1280 height=720 format=yuy2 ! \
  queue ! \
  autovideoconvert ! \
  autovideosink

good_video

The output from the Python program looks very familiar to a pipeline error when the format is not specified correctly. I had run into this when I was learning how to setup the pipeline in the command line.

1

There are 1 best solutions below

0
HanSooloo On BEST ANSWER

It was probably too late when I posted and missed a very obvious step, so posting the answer in case future Google searches come here.

I had forgotten to add the caps between avfvideosrc and rawvideoparse. Updated full code below:

import os
import sys

import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('GLib', '2.0')
gi.require_version('GObject', '2.0')
from gi.repository import GObject, Gst, GstVideo
GObject.threads_init()
Gst.init(None)

class GstCaps(object):
  def __init__(self, caps_string):
    self.caps_string = caps_string

  def __new__(cls, caps_string):
    cf = Gst.ElementFactory.make('capsfilter', None)
    caps = Gst.Caps.from_string(caps_string)
    cf.set_property('caps', caps)
    return cf


class Webcam(object):
  # macOS
  # Downscale from 720p to 360p to 180p to 90p
  # ./gst-launch-1.0 -v -e
  #     avfvideosrc device-index=0 !
  #     "video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle" !
  #     rawvideoparse width=1280 height=720 format=yuy2 !
  #     queue !
  #     autovideoconvert !
  #     autovideosink

  def __init__(self, device_index: int = 1):
    self.mainloop = GObject.MainLoop()

    self.pipeline = Gst.ElementFactory.make('pipeline', 'pipeline')

    self.source = Gst.ElementFactory.make('avfvideosrc', 'source')

    self.source.set_property('device-index', device_index)
    self.caps = GstCaps('video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle')

    self.rawvideoparse = Gst.ElementFactory.make('rawvideoparse', 'rawvideoparse')
    self.rawvideoparse.set_property('width', 1280)
    self.rawvideoparse.set_property('height', 720)
    self.rawvideoparse.set_property('format', 4)

    self.queue = Gst.ElementFactory.make('queue', 'queue')

    self.autovideoconvert = Gst.ElementFactory.make('autovideoconvert', 'autovideoconvert')

    self.autovideosink = Gst.ElementFactory.make('autovideosink', 'autovideosink')

    if (not self.pipeline or
        not self.source or
        not self.caps or
        not self.rawvideoparse or
        not self.queue or
        not self.autovideoconvert or
        not self.autovideosink
    ):
      print('ERROR: Not all elements could be created.')
      sys.exit(1)

    self.pipeline.add(self.source)
    self.pipeline.add(self.caps)              # THIS LINE WAS MISSING
    self.pipeline.add(self.rawvideoparse)
    self.pipeline.add(self.queue)
    self.pipeline.add(self.autovideoconvert)
    self.pipeline.add(self.autovideosink)

    linked = self.source.link(self.caps)                    # THIS LINE WAS MODIFIED TO LINK TO CAPS
    linked = linked and self.caps.link(self.rawvideoparse)  # THIS LINE WAS ADDED TO LINK CAPS TO RAWVIDEOPARSE
    linked = linked and self.rawvideoparse.link(self.queue)
    linked = linked and self.queue.link(self.autovideoconvert)
    linked = linked and self.autovideoconvert.link(self.autovideosink)

    if not linked:
      print("ERROR: Elements could not be linked")
      sys.exit(1)

    self.bus = self.pipeline.get_bus()
    self.bus.add_signal_watch()
    self.bus.connect('message::eos', self.on_eos)
    self.bus.connect('message::error', self.on_error)
    # self.bus.connect('message', self.on_message)

  def run(self):
    self.pipeline.set_state(Gst.State.PLAYING)
    self.mainloop.run()

  def quit(self):
    self.pipeline.set_state(Gst.State.NULL)
    self.mainloop.quit()

  def on_eos(self, bus, message):
    self.quit()

  def on_error(self, bus, message):
    print(f'ERROR: {message.parse_error()}' )
    self.quit()

  def on_message(self, bus, message):
    print(f'MESSAGE: {format(message)}')

webcam = Webcam()
webcam.run()