c++ - Gstreamer appsink 接收缓冲区比 CARMA 板上的实时慢得多

标签 c++ linux gstreamer

我对堆栈溢出问题比较陌生,但我会尽力彻底解释问题。

我目前正在使用 Axis IP Camera 获取 CARMA 板上的实时视频。 GStreamer 然后使用 RTSP 客户端获取这些帧,执行 RTP 卸载,然后解码从相机发送的 h.264 图像。当我在我的计算机(目前配备 i7 处理器)上执行此过程时,没有延迟时间,并且流实时输出到屏幕,以 30 Hz 的速率更新。当我切换到我正在使用的 CARMA 板时,问题就出现了。 appsink 不是实时显示,而是以比正常情况慢得多的速度接收缓冲区。更具体地说,当 CARMA 板上没有其他处理发生时,它不会以 30 Hz 的速率接收缓冲区,而是仅以平均约 10 Hz 的速率接收缓冲区。还应该注意没有丢帧;正在接收缓冲区的 appsink 正在接收所有缓冲区,但不是实时的。非常感谢任何关于为什么会发生这种情况的见解。我已经检查以确保时间戳也不是问题(即,无论我是否使用 GST 时间戳,appsink 接收缓冲区的速率都不会改变)。 CARMA板目前使用的是ubuntu 11.04,使用GCC编译。下面是一些代码片段及其各自的解释。

一些定义

#define APPSINK_CAPS "video/x-raw-yuv,format=(fourcc)I420"
#define RTSP_URI "rtsp://(ipaddress)/axis-media/media.amp?videocodec=h264"
#define RTSP_LATENCY 0
#define RTSP_BUFFER_MODE 0
#define RTSP_RTP_BLOCKSIZE 65536

GStreamer 管道设置代码:

      /* Initialize GStreamer */
      gst_init (&argc, &argv);

      /* Create the elements */
      data.rtspsrc = gst_element_factory_make("rtspsrc", NULL);

      data.rtph264depay = gst_element_factory_make("rtph264depay", NULL);

      data.nv_omx_h264dec = gst_element_factory_make("nv_omx_h264dec", NULL);

      data.appsink = gst_element_factory_make("appsink", NULL);

      if (!data.rtspsrc || !data.rtph264depay || !data.nv_omx_h264dec || !data.appsink) {
        g_printerr ("Not all elements could be created.\n");
        return -1;
      }


      /* Set element properties */
      g_object_set( data.rtspsrc, "location", RTSP_URI,
                                  "latency", RTSP_LATENCY,
                                  "buffer-mode", RTSP_BUFFER_MODE,
                                  "rtp-blocksize", RTSP_RTP_BLOCKSIZE,
                                  NULL);
      g_object_set( data.rtph264depay, "byte-stream", FALSE, NULL);
      g_object_set( data.nv_omx_h264dec, "use-timestamps", TRUE, NULL);


      /* Configure appsink. This plugin will allow us to access buffer data */
      GstCaps *appsink_caps;
      appsink_caps = gst_caps_from_string (APPSINK_CAPS);
      g_object_set (data.appsink, "emit-signals", TRUE,
                                  "caps", appsink_caps,
                                  NULL);
      g_signal_connect (data.appsink, "new-buffer", G_CALLBACK (appsink_new_buffer), &data);
      gst_caps_unref (appsink_caps);


      /* Create the empty pipeline */
      data.pipeline = gst_pipeline_new ("test-pipeline");

      if (!data.pipeline) {
        g_printerr ("Pipeline could not be created.");
      }


      /* Build the pipeline */
      /* Note that we are NOT linking the source at this point. We will do it later. */
      gst_bin_add_many (GST_BIN(data.pipeline),
                        data.rtspsrc,
                        data.rtph264depay,
                        data.nv_omx_h264dec,
                        data.appsink,
                        NULL);

      if (gst_element_link (data.rtph264depay, data.nv_omx_h264dec) != TRUE) {
        g_printerr ("rtph264depay and nv_omx_h264dec could not be linked.\n");
        gst_object_unref (data.pipeline);
        return -1;
      }
      if (gst_element_link (data.nv_omx_h264dec, data.appsink) != TRUE) {
        g_printerr ("nv_omx_h264dec and appsink could not be linked.\n");
        gst_object_unref (data.pipeline);
        return -1;
      }


      /* Connect to the pad-added signal (CALLBACK!) */
      g_signal_connect (data.rtspsrc, "pad-added", G_CALLBACK (pad_added_handler), &data);

      /* Add a probe to perform hashing on H.264 bytestream */
      GstPad *rtph264depay_src_pad = gst_element_get_static_pad (data.rtph264depay, "src");
      (gulong) gst_pad_add_buffer_probe (rtph264depay_src_pad, G_CALLBACK (hash_and_report), (gpointer)(&data));
      gst_object_unref (rtph264depay_src_pad);  //unreference the source pad
  /* Start playing */
  ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);

  if (ret == GST_STATE_CHANGE_FAILURE) {
    g_printerr ("Unable to set the pipeline to the playing state.\n");
    gst_object_unref (data.pipeline);
    return -1;
  }


  /* Wait until error or EOS */
  bus = gst_element_get_bus (data.pipeline);
  do {
    msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS));

    /* Parse message */
    if (msg != NULL) {
      GError *err;
      gchar *debug_info;

      switch (GST_MESSAGE_TYPE (msg)) {
        case GST_MESSAGE_ERROR:
          gst_message_parse_error (msg, &err, &debug_info);
          g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
          g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
          g_clear_error (&err);
          g_free (debug_info);
          terminate = TRUE;
          break;
        case GST_MESSAGE_EOS:
          g_print ("End-Of-stream reached.\n");
          break;
        case GST_MESSAGE_STATE_CHANGED:
          /* We are only interested in state-changed messages from the pipeline */
          if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
            GstState old_state, new_state, pending_state;
            gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
            g_print ("Pipeline state changed from %s to %s:\n", gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
          }
          break;
        default:
          //we should not reach here because we only asked for ERRORs and EOS and State Changes
          g_printerr ("Unexpected message received.\n");
          break;
      }
      gst_message_unref (msg);
    }
  } while (!terminate);

现在是 pad_added_handler:

/* This function will be called by the pad-added signal */
//Thread 1
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
  GstPad *sink_pad = gst_element_get_static_pad (data->rtph264depay, "sink");
  GstPadLinkReturn ret;
  GstCaps *new_pad_caps = NULL;
  GstStructure *new_pad_struct = NULL;
  const gchar *new_pad_type = NULL;

  g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));

  /* Check the new pad's type */
  new_pad_caps = gst_pad_get_caps (new_pad);
  new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
  new_pad_type = gst_structure_get_name (new_pad_struct);
  if (!g_str_has_prefix (new_pad_type, "application/x-rtp")) {
    g_print ("  It has type '%s' which is not RTP. Ignoring.\n", new_pad_type);
    goto exit;
  }

  /* If our converter is already linked, we have nothing to do here */
  if (gst_pad_is_linked (sink_pad)) {
    g_print ("  We are already linked. Ignoring.\n");
    goto exit;
  }

  /* Attempt the link */
  ret = gst_pad_link (new_pad, sink_pad);
  if (GST_PAD_LINK_FAILED (ret)) {
    g_print ("  Type is '%s' but link failed.\n", new_pad_type);
  } else {
    g_print ("  Link succeeded (type '%s').\n", new_pad_type);
  }

exit:
  /* Unreference the new pad's caps, if we got them */
  if (new_pad_caps != NULL)
    gst_caps_unref (new_pad_caps);

  /* Unreference the sink pad */
  gst_object_unref (sink_pad);
}

现在每次 appsink 收到缓冲区时都会调用 appsink。这是我认为(虽然不确定)不是实时接收缓冲区的功能,让我相信我正在做的某种处理导致在处理另一个缓冲区之前需要太多时间:

// Called when appsink receives a buffer: Thread 1
void appsink_new_buffer (GstElement *sink, CustomData *data) {
  GstBuffer *buffer;

  /* Retrieve the buffer */
  g_signal_emit_by_name (sink, "pull-buffer", &buffer);
  if (buffer) {

    (((CustomData*)data)->appsink_buffer_count)++;

    //push buffer onto queue, to be processed in different thread
    if (GstBufferQueue->size() > GSTBUFFERQUEUE_SIZE) {
      //error message
      printf ("GstBufferQueue is full!\n");
      //release buffer
      gst_buffer_unref (buffer);
    } else {
      //push onto queue
      GstBufferQueue->push(buffer);
      //activate thread
      connectionDataAvailable_GstBufferQueue.notify_all();
    }
  }
}

指向我正在使用的相机的链接:

http://www.axis.com/products/cam_p1357/index.htm

希望这对您有所帮助。我将继续亲自调查这个问题,并在出现更新时提供更新。如果您需要任何其他信息,请告诉我,我期待阅读您的回复!

谢谢

最佳答案

显然问题不在于程序(即软件设计),而是 CARMA 板上的硬件组件无法跟上我正在执行的处理量。换句话说,CARMA 上的 Tegra 3 处理器作为一个设备是不够的。可能的解决方案是减少我在 CARMA 板上进行的处理或升级到不同的板。我希望这有助于人们理解较小设备上可用的有限处理能力,同时也让人们意识到处理器(特别是在实现片上系统模型的 Tegra 3 类别中)目前可能没有计算能力跟上需要大型实时计算的项目或系统所需的能力。

简而言之,买东西要小心!尽最大努力确保您购买的东西适合该项目!也就是说,不要害怕尝试新设备。尽管不能做我想做的事,但我学到的东西比我想象的要多。毕竟,计算机科学只是不断学习 :p

关于c++ - Gstreamer appsink 接收缓冲区比 CARMA 板上的实时慢得多,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/18390079/

相关文章:

linux - echo 在并行进程中启动子 shell?

audio-streaming - 将PulseAudio流传输到文件(可能使用GStreamer)

c++ - Gstreamer 录制带音频的视频

c++ - win32 进程终止检测 : WMI vs. WaitForSingleObject

c++ - 什么时候应该将多线程与 asio 一起使用?

php - 使用cURL将数据从网页复制到cpp中的字符串

python - 从已安装二进制文件的源构建包

linux - 让 xvfb-run 作为后台进程并注销

rust - 将 Gstreamer Bin 与 Rust 中的 Ghostpads 连接起来

c++ - 在链表中添加两个数字