Asterisk - The Open Source Telephony Project  18.5.0
console_video.c
Go to the documentation of this file.
1 /*
2  * Asterisk -- An open source telephony toolkit.
3  *
4  * Copyright 2007-2008, Marta Carbone, Sergio Fadda, Luigi Rizzo
5  *
6  * See http://www.asterisk.org for more information about
7  * the Asterisk project. Please do not directly contact
8  * any of the maintainers of this project for assistance;
9  * the project provides a web site, mailing lists and IRC
10  * channels for your use.
11  *
12  * This program is free software, distributed under the terms of
13  * the GNU General Public License Version 2. See the LICENSE file
14  * at the top of the source tree.
15  */
16 
17 /*
18  * Experimental support for video sessions. We use SDL for rendering, ffmpeg
19  * as the codec library for encoding and decoding, and Video4Linux and X11
20  * to generate the local video stream.
21  *
22  * If one of these pieces is not available, either at compile time or at
23  * runtime, we do our best to run without it. Of course, no codec library
24  * means we can only deal with raw data, no SDL means we cannot do rendering,
25  * no V4L or X11 means we cannot generate data (but in principle we could
26  * stream from or record to a file).
27  *
28  * We need a recent (2007.07.12 or newer) version of ffmpeg to avoid warnings.
29  * Older versions might give 'deprecated' messages during compilation,
30  * thus not compiling in AST_DEVMODE, or don't have swscale, in which case
31  * you can try to compile #defining OLD_FFMPEG here.
32  *
33  * $Revision$
34  */
35 
36 //#define DROP_PACKETS 5 /* if set, drop this % of video packets */
37 //#define OLD_FFMPEG 1 /* set for old ffmpeg with no swscale */
38 
39 /*** MODULEINFO
40  <support_level>extended</support_level>
41  ***/
42 
43 #include "asterisk.h"
44 #include <sys/ioctl.h>
45 #include "asterisk/cli.h"
46 #include "asterisk/file.h"
47 #include "asterisk/channel.h"
48 
49 #include "console_video.h"
50 
51 /*
52 The code is structured as follows.
53 
54 When a new console channel is created, we call console_video_start()
55 to initialize SDL, the source, and the encoder/ decoder for the
56 formats in use (XXX the latter two should be done later, once the
57 codec negotiation is complete). Also, a thread is created to handle
58 the video source and generate frames.
59 
60 While communication is on, the local source is generated by the
61 video thread, which wakes up periodically, generates frames and
62 enqueues them in chan->readq. Incoming rtp frames are passed to
63 console_write_video(), decoded and passed to SDL for display.
64 
65 For as unfortunate and confusing as it can be, we need to deal with a
66 number of different video representations (size, codec/pixel format,
67 codec parameters), as follows:
68 
69  loc_src is the data coming from the camera/X11/etc.
70  The format is typically constrained by the video source.
71 
72  enc_in is the input required by the encoder.
73  Typically constrained in size by the encoder type.
74 
75  enc_out is the bitstream transmitted over RTP.
76  Typically negotiated while the call is established.
77 
78  loc_dpy is the format used to display the local video source.
79  Depending on user preferences this can have the same size as
80  loc_src_fmt, or enc_in_fmt, or thumbnail size (e.g. PiP output)
81 
82  dec_in is the incoming RTP bitstream. Negotiated
83  during call establishment, it is not necessarily the same as
84  enc_in_fmt
85 
86  dec_out the output of the decoder.
87  The format is whatever the other side sends, and the
88  buffer is allocated by avcodec_decode_... so we only
89  copy the data here.
90 
91  rem_dpy the format used to display the remote stream
92 
93  src_dpy is the format used to display the local video source streams
94  The number of these fbuf_t is determined at run time, with dynamic allocation
95 
96 We store the format info together with the buffer storing the data.
97 As a future optimization, a format/buffer may reference another one
98 if the formats are equivalent. This will save some unnecessary format
99 conversion.
100 
101 
102 In order to handle video you need to add to sip.conf (and presumably
103 iax.conf too) the following:
104 
105  [general](+)
106  videosupport=yes
107  allow=h263 ; this or other video formats
108  allow=h263p ; this or other video formats
109 
110  */
111 
112 /*
113  * Codecs are absolutely necessary or we cannot do anything.
114  * SDL is optional (used for rendering only), so that we can still
115  * stream video withouth displaying it.
116  */
117 #if !defined(HAVE_VIDEO_CONSOLE) || !defined(HAVE_FFMPEG)
118 /* stubs if required pieces are missing */
119 int console_write_video(struct ast_channel *chan, struct ast_frame *f)
120 {
121  return 0; /* writing video not supported */
122 }
123 
124 int console_video_cli(struct video_desc *env, const char *var, int fd)
125 {
126  return 1; /* nothing matched */
127 }
128 
129 int console_video_config(struct video_desc **penv, const char *var, const char *val)
130 {
131  return 1; /* no configuration */
132 }
133 
134 void console_video_start(struct video_desc *env, struct ast_channel *owner)
135 {
136  ast_log(LOG_NOTICE, "voice only, console video support not present\n");
137 }
138 
139 void console_video_uninit(struct video_desc *env)
140 {
141 }
142 
143 int get_gui_startup(struct video_desc* env)
144 {
145  return 0; /* no gui here */
146 }
147 
149 
150 #else /* defined(HAVE_FFMPEG) && defined(HAVE_SDL) */
151 
152 /*! The list of video formats we support. */
154  AST_FORMAT_H263_PLUS | AST_FORMAT_H263 |
155  AST_FORMAT_MP4_VIDEO | AST_FORMAT_H264 | AST_FORMAT_H261 ;
156 
157 
158 
159 /* function to scale and encode buffers */
160 static void my_scale(struct fbuf_t *in, AVPicture *p_in,
161  struct fbuf_t *out, AVPicture *p_out);
162 
163 /*
164  * this structure will be an entry in the table containing
165  * every device specified in the file oss.conf, it contains various infomation
166  * about the device
167  */
168 struct video_device {
169  char *name; /* name of the device */
170  /* allocated dynamically (see fill_table function) */
171  struct grab_desc *grabber; /* the grabber for the device type */
172  void *grabber_data; /* device's private data structure */
173  struct fbuf_t *dev_buf; /* buffer for incoming data */
174  struct timeval last_frame; /* when we read the last frame ? */
175  int status_index; /* what is the status of the device (source) */
176  /* status index is set using the IS_ON, IS_PRIMARY and IS_SECONDARY costants */
177  /* status_index is the index of the status message in the src_msgs array in console_gui.c */
178 };
179 
180 struct video_codec_desc; /* forward declaration */
181 /*
182  * Descriptor of the local source, made of the following pieces:
183  * + configuration info (geometry, device name, fps...). These are read
184  * from the config file and copied here before calling video_out_init();
185  * + the frame buffer (buf) and source pixel format, allocated at init time;
186  * + the encoding and RTP info, including timestamps to generate
187  * frames at the correct rate;
188  * + source-specific info, i.e. fd for /dev/video, dpy-image for x11, etc,
189  * filled in by grabber_open, part of source_specific information are in
190  * the device table (devices member), others are shared;
191  * NOTE: loc_src.data == NULL means the rest of the struct is invalid, and
192  * the video source is not available.
193  */
194 struct video_out_desc {
195  /* video device support.
196  * videodevice and geometry are read from the config file.
197  * At the right time we try to open it and allocate a buffer.
198  * If we are successful, webcam_bufsize > 0 and we can read.
199  */
200  /* all the following is config file info copied from the parent */
201  int fps;
202  int bitrate;
203  int qmin;
204 
205  int sendvideo;
206 
207  struct fbuf_t loc_src_geometry; /* local source geometry only (from config file) */
208  struct fbuf_t enc_out; /* encoder output buffer, allocated in video_out_init() */
209 
210  struct video_codec_desc *enc; /* encoder */
211  void *enc_ctx; /* encoding context */
212  AVCodec *codec;
213  AVFrame *enc_in_frame; /* enc_in mapped into avcodec format. */
214  /* The initial part of AVFrame is an AVPicture */
215  int mtu;
216 
217  /* Table of devices specified with "videodevice=" in oss.conf.
218  * Static size as we have a limited number of entries.
219  */
220  struct video_device devices[MAX_VIDEO_SOURCES];
221  int device_num; /*number of devices in table*/
222  int device_primary; /*index of the actual primary device in the table*/
223  int device_secondary; /*index of the actual secondary device in the table*/
224 
225  int picture_in_picture; /*Is the PiP mode activated? 0 = NO | 1 = YES*/
226 
227  /* these are the coordinates of the picture inside the picture (visible if PiP mode is active)
228  these coordinates are valid considering the containing buffer with cif geometry*/
229  int pip_x;
230  int pip_y;
231 };
232 
233 /*
234  * The overall descriptor, with room for config info, video source and
235  * received data descriptors, SDL info, etc.
236  * This should be globally visible to all modules (grabber, vcodecs, gui)
237  * and contain all configurtion info.
238  */
239 struct video_desc {
240  char codec_name[64]; /* the codec we use */
241 
242  int stayopen; /* set if gui starts manually */
243  pthread_t vthread; /* video thread */
244  ast_mutex_t dec_lock; /* sync decoder and video thread */
245  int shutdown; /* set to shutdown vthread */
246  struct ast_channel *owner; /* owner channel */
247 
248 
249  struct fbuf_t enc_in; /* encoder input buffer, allocated in video_out_init() */
250 
251  char keypad_file[256]; /* image for the keypad */
252  char keypad_font[256]; /* font for the keypad */
253 
254  char sdl_videodriver[256];
255 
256  struct fbuf_t rem_dpy; /* display remote video, no buffer (it is in win[WIN_REMOTE].bmp) */
257  struct fbuf_t loc_dpy; /* display local source, no buffer (managed by SDL in bmp[1]) */
258 
259  /* geometry of the thumbnails for all video sources. */
260  struct fbuf_t src_dpy[MAX_VIDEO_SOURCES]; /* no buffer allocated here */
261 
262  int frame_freeze; /* flag to freeze the incoming frame */
263 
264  /* local information for grabbers, codecs, gui */
265  struct gui_info *gui;
266  struct video_dec_desc *in; /* remote video descriptor */
267  struct video_out_desc out; /* local video descriptor */
268 };
269 
270 static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p);
271 
272 void fbuf_free(struct fbuf_t *b)
273 {
274  struct fbuf_t x = *b;
275 
276  if (b->data && b->size)
277  ast_free(b->data);
278  memset(b, '\0', sizeof(*b));
279  /* restore some fields */
280  b->w = x.w;
281  b->h = x.h;
282  b->pix_fmt = x.pix_fmt;
283 }
284 
285 /* return the status of env->stayopen to chan_oss, as the latter
286  * does not have access to fields of struct video_desc
287  */
288 int get_gui_startup(struct video_desc* env)
289 {
290  return env ? env->stayopen : 0;
291 }
292 
293 #if 0
294 /* helper function to print the amount of memory used by the process.
295  * Useful to track memory leaks, unfortunately this code is OS-specific
296  * so we keep it commented out.
297  */
298 static int
299 used_mem(const char *msg)
300 {
301  char in[128];
302 
303  pid_t pid = getpid();
304  sprintf(in, "ps -o vsz= -o rss= %d", pid);
305  ast_log(LOG_WARNING, "used mem (vsize, rss) %s ", msg);
306  system(in);
307  return 0;
308 }
309 #endif
310 
311 #include "vcodecs.c"
312 #include "console_gui.c"
313 
314 /*! \brief Try to open video sources, return 0 on success, 1 on error
315  * opens all video sources found in the oss.conf configuration files.
316  * Saves the grabber and the datas in the device table (in the devices field
317  * of the descriptor referenced by v).
318  * Initializes the device_primary and device_secondary
319  * fields of v with the first devices that was
320  * successfully opened.
321  *
322  * \param v = video out environment descriptor
323  *
324  * returns 0 on success, 1 on error
325 */
326 static int grabber_open(struct video_out_desc *v)
327 {
328  struct grab_desc *g;
329  void *g_data;
330  int i, j;
331 
332  /* for each device in the device table... */
333  for (i = 0; i < v->device_num; i++) {
334  /* device already open */
335  if (v->devices[i].grabber)
336  continue;
337  /* for each type of grabber supported... */
338  for (j = 0; (g = console_grabbers[j]); j++) {
339  /* the grabber is opened and the informations saved in the device table */
340  g_data = g->open(v->devices[i].name, &v->loc_src_geometry, v->fps);
341  if (!g_data)
342  continue;
343  v->devices[i].grabber = g;
344  v->devices[i].grabber_data = g_data;
345  v->devices[i].status_index |= IS_ON;
346  }
347  }
348  /* the first working device is selected as the primary one and the secondary one */
349  for (i = 0; i < v->device_num; i++) {
350  if (!v->devices[i].grabber)
351  continue;
352  v->device_primary = i;
353  v->device_secondary = i;
354  return 0; /* source found */
355  }
356  return 1; /* no source found */
357 }
358 
359 
360 /*! \brief complete a buffer from the specified local video source.
361  * Called by get_video_frames(), in turn called by the video thread.
362  *
363  * \param dev = video environment descriptor
364  * \param fps = frame per seconds, for every device
365  *
366  * returns:
367  * - NULL on falure
368  * - reference to the device buffer on success
369  */
370 static struct fbuf_t *grabber_read(struct video_device *dev, int fps)
371 {
372  struct timeval now = ast_tvnow();
373 
374  if (dev->grabber == NULL) /* not initialized */
375  return NULL;
376 
377  /* the last_frame field in this row of the device table (dev)
378  is always initialized, it is set during the parsing of the config
379  file, and never unset, function fill_device_table(). */
380  /* check if it is time to read */
381  if (ast_tvdiff_ms(now, dev->last_frame) < 1000/fps)
382  return NULL; /* too early */
383  dev->last_frame = now; /* XXX actually, should correct for drift */
384  return dev->grabber->read(dev->grabber_data);
385 }
386 
387 /*! \brief handler run when dragging with the left button on
388  * the local source window - the effect is to move the offset
389  * of the captured area.
390  */
391 static void grabber_move(struct video_device *dev, int dx, int dy)
392 {
393  if (dev->grabber && dev->grabber->move) {
394  dev->grabber->move(dev->grabber_data, dx, dy);
395  }
396 }
397 
398 /*
399  * Map the codec name to the library. If not recognised, use a default.
400  * This is useful in the output path where we decide by name, presumably.
401  */
402 static struct video_codec_desc *map_config_video_format(char *name)
403 {
404  int i;
405 
406  for (i = 0; supported_codecs[i]; i++)
407  if (!strcasecmp(name, supported_codecs[i]->name))
408  break;
409  if (supported_codecs[i] == NULL) {
410  ast_log(LOG_WARNING, "Cannot find codec for '%s'\n", name);
411  i = 0;
412  strcpy(name, supported_codecs[i]->name);
413  }
414  ast_log(LOG_WARNING, "Using codec '%s'\n", name);
415  return supported_codecs[i];
416 }
417 
418 
419 /*! \brief uninitialize the descriptor for local video stream */
420 static int video_out_uninit(struct video_desc *env)
421 {
422  struct video_out_desc *v = &env->out;
423  int i; /* integer variable used as iterator */
424 
425  /* XXX this should be a codec callback */
426  if (v->enc_ctx) {
427  AVCodecContext *enc_ctx = (AVCodecContext *)v->enc_ctx;
428  avcodec_close(enc_ctx);
429  av_free(enc_ctx);
430  v->enc_ctx = NULL;
431  }
432  if (v->enc_in_frame) {
433  av_free(v->enc_in_frame);
434  v->enc_in_frame = NULL;
435  }
436  v->codec = NULL; /* nothing to free, this is only a reference */
437  /* release the buffers */
438  fbuf_free(&env->enc_in);
439  fbuf_free(&v->enc_out);
440  /* close the grabbers */
441  for (i = 0; i < v->device_num; i++) {
442  if (v->devices[i].grabber){
443  v->devices[i].grabber_data =
444  v->devices[i].grabber->close(v->devices[i].grabber_data);
445  v->devices[i].grabber = NULL;
446  /* dev_buf is already freed by grabber->close() */
447  v->devices[i].dev_buf = NULL;
448  }
449  v->devices[i].status_index = 0;
450  }
451  v->picture_in_picture = 0;
452  env->frame_freeze = 0;
453  return -1;
454 }
455 
456 /*
457  * Initialize the encoder for the local source:
458  * - enc_ctx, codec, enc_in_frame are used by ffmpeg for encoding;
459  * - enc_out is used to store the encoded frame (to be sent)
460  * - mtu is used to determine the max size of video fragment
461  * NOTE: we enter here with the video source already open.
462  */
463 static int video_out_init(struct video_desc *env)
464 {
465  int codec;
466  int size;
467  struct fbuf_t *enc_in;
468  struct video_out_desc *v = &env->out;
469 
470  v->enc_ctx = NULL;
471  v->codec = NULL;
472  v->enc_in_frame = NULL;
473  v->enc_out.data = NULL;
474 
475  codec = map_video_format(v->enc->format, CM_WR);
476  v->codec = avcodec_find_encoder(codec);
477  if (!v->codec) {
478  ast_log(LOG_WARNING, "Cannot find the encoder for format %d\n",
479  codec);
480  return -1; /* error, but nothing to undo yet */
481  }
482 
483  v->mtu = 1400; /* set it early so the encoder can use it */
484 
485  /* allocate the input buffer for encoding.
486  * ffmpeg only supports PIX_FMT_YUV420P for the encoding.
487  */
488  enc_in = &env->enc_in;
489  enc_in->pix_fmt = PIX_FMT_YUV420P;
490  enc_in->size = (enc_in->w * enc_in->h * 3)/2;
491  enc_in->data = ast_calloc(1, enc_in->size);
492  if (!enc_in->data) {
493  ast_log(LOG_WARNING, "Cannot allocate encoder input buffer\n");
494  return video_out_uninit(env);
495  }
496  /* construct an AVFrame that points into buf_in */
497  v->enc_in_frame = avcodec_alloc_frame();
498  if (!v->enc_in_frame) {
499  ast_log(LOG_WARNING, "Unable to allocate the encoding video frame\n");
500  return video_out_uninit(env);
501  }
502 
503  /* parameters for PIX_FMT_YUV420P */
504  size = enc_in->w * enc_in->h;
505  v->enc_in_frame->data[0] = enc_in->data;
506  v->enc_in_frame->data[1] = v->enc_in_frame->data[0] + size;
507  v->enc_in_frame->data[2] = v->enc_in_frame->data[1] + size/4;
508  v->enc_in_frame->linesize[0] = enc_in->w;
509  v->enc_in_frame->linesize[1] = enc_in->w/2;
510  v->enc_in_frame->linesize[2] = enc_in->w/2;
511 
512  /* now setup the parameters for the encoder.
513  * XXX should be codec-specific
514  */
515  {
516  AVCodecContext *enc_ctx = avcodec_alloc_context();
517  v->enc_ctx = enc_ctx;
518  enc_ctx->pix_fmt = enc_in->pix_fmt;
519  enc_ctx->width = enc_in->w;
520  enc_ctx->height = enc_in->h;
521  /* XXX rtp_callback ?
522  * rtp_mode so ffmpeg inserts as many start codes as possible.
523  */
524  enc_ctx->rtp_mode = 1;
525  enc_ctx->rtp_payload_size = v->mtu / 2; // mtu/2
526  enc_ctx->bit_rate = v->bitrate;
527  enc_ctx->bit_rate_tolerance = enc_ctx->bit_rate/2;
528  enc_ctx->qmin = v->qmin; /* should be configured */
529  enc_ctx->time_base = (AVRational){1, v->fps};
530  enc_ctx->gop_size = v->fps*5; // emit I frame every 5 seconds
531 
532  v->enc->enc_init(v->enc_ctx);
533 
534  if (avcodec_open(enc_ctx, v->codec) < 0) {
535  ast_log(LOG_WARNING, "Unable to initialize the encoder %d\n", codec);
536  av_free(enc_ctx);
537  v->enc_ctx = NULL;
538  return video_out_uninit(env);
539  }
540  }
541  /*
542  * Allocate enough for the encoded bitstream. As we are compressing,
543  * we hope that the output is never larger than the input size.
544  */
545  v->enc_out.data = ast_calloc(1, enc_in->size);
546  v->enc_out.size = enc_in->size;
547  v->enc_out.used = 0;
548 
549  return 0;
550 }
551 
552 /*! \brief possibly uninitialize the video console.
553  * Called at the end of a call, should reset the 'owner' field,
554  * then possibly terminate the video thread if the gui has
555  * not been started manually.
556  * In practice, signal the thread and give it a bit of time to
557  * complete, giving up if it gets stuck. Because uninit
558  * is called from hangup with the channel locked, and the thread
559  * uses the chan lock, we need to unlock here. This is unsafe,
560  * and we should really use refcounts for the channels.
561  */
562 void console_video_uninit(struct video_desc *env)
563 {
564  int i, t = 100; /* initial wait is shorter, than make it longer */
565  if (env->stayopen == 0) { /* gui opened by a call, do the shutdown */
566  env->shutdown = 1;
567  for (i=0; env->shutdown && i < 10; i++) {
568  if (env->owner)
569  ast_channel_unlock(env->owner);
570  usleep(t);
571  t = 1000000;
572  if (env->owner)
573  ast_channel_lock(env->owner);
574  }
575  env->vthread = NULL;
576  }
577  env->owner = NULL; /* this is unconditional */
578 }
579 
580 /*! fill an AVPicture from our fbuf info, as it is required by
581  * the image conversion routines in ffmpeg. Note that the pointers
582  * are recalculated if the fbuf has an offset (and so represents a picture in picture)
583  * XXX This depends on the format.
584  */
585 static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p)
586 {
587  /* provide defaults for commonly used formats */
588  int l4 = b->w * b->h/4; /* size of U or V frame */
589  int len = b->w; /* Y linesize, bytes */
590  int luv = b->w/2; /* U/V linesize, bytes */
591  int sample_size = 1;
592 
593  memset(p, '\0', sizeof(*p));
594  switch (b->pix_fmt) {
595  case PIX_FMT_RGB555:
596  case PIX_FMT_RGB565:
597  sample_size = 2;
598  luv = 0;
599  break;
600  case PIX_FMT_RGBA32:
601  sample_size = 4;
602  luv = 0;
603  break;
604  case PIX_FMT_YUYV422: /* Packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr */
605  sample_size = 2; /* all data in first plane, probably */
606  luv = 0;
607  break;
608  }
609  len *= sample_size;
610 
611  p->data[0] = b->data;
612  p->linesize[0] = len;
613  /* these are only valid for component images */
614  p->data[1] = luv ? b->data + 4*l4 : b->data+len;
615  p->data[2] = luv ? b->data + 5*l4 : b->data+len;
616  p->linesize[1] = luv;
617  p->linesize[2] = luv;
618 
619  /* add the offsets to the pointers previously calculated,
620  it is necessary for the picture in picture mode */
621  p->data[0] += len*b->win_y + b->win_x*sample_size;
622  if (luv) {
623  p->data[1] += luv*(b->win_y/2) + (b->win_x/2) * sample_size;
624  p->data[2] += luv*(b->win_y/2) + (b->win_x/2) * sample_size;
625  }
626  return p;
627 }
628 
629 /*! convert/scale between an input and an output format.
630  * Old version of ffmpeg only have img_convert, which does not rescale.
631  * New versions use sws_scale which does both.
632  */
633 static void my_scale(struct fbuf_t *in, AVPicture *p_in,
634  struct fbuf_t *out, AVPicture *p_out)
635 {
636  AVPicture my_p_in, my_p_out;
637  int eff_w=out->w, eff_h=out->h;
638 
639  if (p_in == NULL)
640  p_in = fill_pict(in, &my_p_in);
641  if (p_out == NULL)
642  p_out = fill_pict(out, &my_p_out);
643 
644  /*if win_w is different from zero then we must change
645  the size of the scaled buffer (the position is already
646  encoded into the out parameter)*/
647  if (out->win_w) { /* picture in picture enabled */
648  eff_w=out->win_w;
649  eff_h=out->win_h;
650  }
651 #ifdef OLD_FFMPEG
652  /* XXX img_convert is deprecated, and does not do rescaling, PiP not supported */
653  img_convert(p_out, out->pix_fmt,
654  p_in, in->pix_fmt, in->w, in->h);
655 #else /* XXX replacement */
656  {
657  struct SwsContext *convert_ctx;
658 
659  convert_ctx = sws_getContext(in->w, in->h, in->pix_fmt,
660  eff_w, eff_h, out->pix_fmt,
661  SWS_BICUBIC, NULL, NULL, NULL);
662  if (convert_ctx == NULL) {
663  ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed\n");
664  return;
665  }
666  if (0)
667  ast_log(LOG_WARNING, "in %d %dx%d out %d %dx%d\n",
668  in->pix_fmt, in->w, in->h, out->pix_fmt, eff_w, eff_h);
669  sws_scale(convert_ctx,
670  p_in->data, p_in->linesize,
671  in->w, in->h, /* src slice */
672  p_out->data, p_out->linesize);
673 
674  sws_freeContext(convert_ctx);
675  }
676 #endif /* XXX replacement */
677 }
678 
679 struct video_desc *get_video_desc(struct ast_channel *c);
680 
681 /*
682  * This function is called (by asterisk) for each video packet
683  * coming from the network (the 'in' path) that needs to be processed.
684  * We need to reconstruct the entire video frame before we can decode it.
685  * After a video packet is received we have to:
686  * - extract the bitstream with pre_process_data()
687  * - append the bitstream to a buffer
688  * - if the fragment is the last (RTP Marker) we decode it with decode_video()
689  * - after the decoding is completed we display the decoded frame with show_frame()
690  */
691 int console_write_video(struct ast_channel *chan, struct ast_frame *f);
692 int console_write_video(struct ast_channel *chan, struct ast_frame *f)
693 {
694  struct video_desc *env = get_video_desc(chan);
695  struct video_dec_desc *v = env->in;
696 
697  if (!env->gui) /* no gui, no rendering */
698  return 0;
699  if (v == NULL)
700  env->in = v = dec_init(f->subclass & ~1);
701  if (v == NULL) {
702  /* This is not fatal, but we won't have incoming video */
703  ast_log(LOG_WARNING, "Cannot initialize input decoder\n");
704  return 0;
705  }
706 
707  if (v->dec_in_cur == NULL) /* no buffer for incoming frames, drop */
708  return 0;
709 #if defined(DROP_PACKETS) && DROP_PACKETS > 0
710  /* Simulate lost packets */
711  if ((random() % 10000) <= 100*DROP_PACKETS) {
712  ast_log(LOG_NOTICE, "Packet lost [%d]\n", f->seqno);
713  return 0;
714  }
715 #endif
716  if (v->discard) {
717  /*
718  * In discard mode, drop packets until we find one with
719  * the RTP marker set (which is the end of frame).
720  * Note that the RTP marker flag is sent as the LSB of the
721  * subclass, which is a bitmask of formats. The low bit is
722  * normally used for audio so there is no interference.
723  */
724  if (f->subclass & 0x01) {
725  v->dec_in_cur->used = 0;
726  v->dec_in_cur->ebit = 0;
727  v->next_seq = f->seqno + 1; /* wrap at 16 bit */
728  v->discard = 0;
729  ast_log(LOG_WARNING, "out of discard mode, frame %d\n", f->seqno);
730  }
731  return 0;
732  }
733 
734  /*
735  * Only in-order fragments will be accepted. Remember seqno
736  * has 16 bit so there is wraparound. Also, ideally we could
737  * accept a bit of reordering, but at the moment we don't.
738  */
739  if (v->next_seq != f->seqno) {
740  ast_log(LOG_WARNING, "discarding frame out of order, %d %d\n",
741  v->next_seq, f->seqno);
742  v->discard = 1;
743  return 0;
744  }
745  v->next_seq++;
746 
747  if (f->data.ptr == NULL || f->datalen < 2) {
748  ast_log(LOG_WARNING, "empty video frame, discard\n");
749  return 0;
750  }
751  if (v->d_callbacks->dec_decap(v->dec_in_cur, f->data.ptr, f->datalen)) {
752  ast_log(LOG_WARNING, "error in dec_decap, enter discard\n");
753  v->discard = 1;
754  }
755  if (f->subclass & 0x01) { // RTP Marker
756  /* prepare to decode: advance the buffer so the video thread knows. */
757  struct fbuf_t *tmp = v->dec_in_cur; /* store current pointer */
758  ast_mutex_lock(&env->dec_lock);
759  if (++v->dec_in_cur == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
760  v->dec_in_cur = &v->dec_in[0];
761  if (v->dec_in_dpy == NULL) { /* were not displaying anything, so set it */
762  v->dec_in_dpy = tmp;
763  } else if (v->dec_in_dpy == v->dec_in_cur) { /* current slot is busy */
764  v->dec_in_cur = NULL;
765  }
766  ast_mutex_unlock(&env->dec_lock);
767  }
768  return 0;
769 }
770 
771 
772 /*! \brief refreshes the buffers of all the device by calling the
773  * grabber_read on each device in the device table.
774  * it encodes the primary source buffer, if the picture in picture mode is
775  * enabled it encodes (in the buffer to split) the secondary source buffer too.
776  * The encoded buffer is splitted to build the local and the remote view.
777  * Return a list of ast_frame representing the video fragments.
778  * The head pointer is returned by the function, the tail pointer
779  * is returned as an argument.
780  *
781  * \param env = video environment descriptor
782  * \param tail = tail ponter (pratically a return value)
783  */
784 static struct ast_frame *get_video_frames(struct video_desc *env, struct ast_frame **tail)
785 {
786  struct video_out_desc *v = &env->out;
787  struct ast_frame *dummy;
788  struct fbuf_t *loc_src_primary = NULL, *p_read;
789  int i;
790  /* if no device was found in the config file */
791  if (!env->out.device_num)
792  return NULL;
793  /* every time this function is called we refresh the buffers of every device,
794  updating the private device buffer in the device table */
795  for (i = 0; i < env->out.device_num; i++) {
796  p_read = grabber_read(&env->out.devices[i], env->out.fps);
797  /* it is used only if different from NULL, we mantain last good buffer otherwise */
798  if (p_read)
799  env->out.devices[i].dev_buf = p_read;
800  }
801  /* select the primary device buffer as the one to encode */
802  loc_src_primary = env->out.devices[env->out.device_primary].dev_buf;
803  /* loc_src_primary can be NULL if the device has been turned off during
804  execution of it is read too early */
805  if (loc_src_primary) {
806  /* Scale the video for the encoder, then use it for local rendering
807  so we will see the same as the remote party */
808  my_scale(loc_src_primary, NULL, &env->enc_in, NULL);
809  }
810  if (env->out.picture_in_picture) { /* the picture in picture mode is enabled */
811  struct fbuf_t *loc_src_secondary;
812  /* reads from the secondary source */
813  loc_src_secondary = env->out.devices[env->out.device_secondary].dev_buf;
814  if (loc_src_secondary) {
815  env->enc_in.win_x = env->out.pip_x;
816  env->enc_in.win_y = env->out.pip_y;
817  env->enc_in.win_w = env->enc_in.w/3;
818  env->enc_in.win_h = env->enc_in.h/3;
819  /* scales to the correct geometry and inserts in
820  the enc_in buffer the picture in picture */
821  my_scale(loc_src_secondary, NULL, &env->enc_in, NULL);
822  /* returns to normal parameters (not picture in picture) */
823  env->enc_in.win_x = 0;
824  env->enc_in.win_y = 0;
825  env->enc_in.win_w = 0;
826  env->enc_in.win_h = 0;
827  }
828  else {
829  /* loc_src_secondary can be NULL if the device has been turned off during
830  execution of it is read too early */
831  env->out.picture_in_picture = 0; /* disable picture in picture */
832  }
833  }
834  show_frame(env, WIN_LOCAL); /* local rendering */
835  for (i = 0; i < env->out.device_num; i++)
836  show_frame(env, i+WIN_SRC1); /* rendering of every source device in thumbnails */
837  if (tail == NULL)
838  tail = &dummy;
839  *tail = NULL;
840  /* if no reason for encoding, do not encode */
841  if (!env->owner || !loc_src_primary || !v->sendvideo)
842  return NULL;
843  if (v->enc_out.data == NULL) {
844  static volatile int a = 0;
845  if (a++ < 2)
846  ast_log(LOG_WARNING, "fail, no encoder output buffer\n");
847  return NULL;
848  }
849  v->enc->enc_run(v);
850  return v->enc->enc_encap(&v->enc_out, v->mtu, tail);
851 }
852 
853 /*
854  * Helper thread to periodically poll the video sources and enqueue the
855  * generated frames directed to the remote party to the channel's queue.
856  * Using a separate thread also helps because the encoding can be
857  * computationally expensive so we don't want to starve the main thread.
858  */
859 static void *video_thread(void *arg)
860 {
861  struct video_desc *env = arg;
862  int count = 0;
863  char save_display[128] = "";
864  int i; /* integer variable used as iterator */
865 
866  /* if sdl_videodriver is set, override the environment. Also,
867  * if it contains 'console' override DISPLAY around the call to SDL_Init
868  * so we use the console as opposed to the x11 version of aalib
869  */
870  if (!ast_strlen_zero(env->sdl_videodriver)) { /* override */
871  const char *s = getenv("DISPLAY");
872  setenv("SDL_VIDEODRIVER", env->sdl_videodriver, 1);
873  if (s && !strcasecmp(env->sdl_videodriver, "aalib-console")) {
874  ast_copy_string(save_display, s, sizeof(save_display));
875  unsetenv("DISPLAY");
876  }
877  }
878  sdl_setup(env);
879  if (!ast_strlen_zero(save_display)) {
880  setenv("DISPLAY", save_display, 1);
881  }
882 
883  ast_mutex_init(&env->dec_lock); /* used to sync decoder and renderer */
884 
885  if (grabber_open(&env->out)) {
886  ast_log(LOG_WARNING, "cannot open local video source\n");
887  }
888 
889  if (env->out.device_num) {
890  env->out.devices[env->out.device_primary].status_index |= IS_PRIMARY | IS_SECONDARY;
891  }
892 
893  /* even if no device is connected, we must call video_out_init,
894  * as some of the data structures it initializes are
895  * used in get_video_frames()
896  */
897  video_out_init(env);
898 
899  /* Writes intial status of the sources. */
900  if (env->gui) {
901  for (i = 0; i < env->out.device_num; i++) {
902  print_message(env->gui->thumb_bd_array[i].board,
903  src_msgs[env->out.devices[i].status_index]);
904  }
905  }
906 
907  for (;;) {
908  struct timespec t = { 0, 50000000 }; /* XXX 20 times/sec */
909  struct ast_frame *p, *f;
910  struct ast_channel *chan;
911  int fd;
912  char *caption = NULL, buf[160];
913 
914  /* determine if video format changed */
915  if (count++ % 10 == 0) {
916  if (env->out.sendvideo && env->out.devices) {
917  snprintf(buf, sizeof(buf), "%s %s %dx%d @@ %dfps %dkbps",
918  env->out.devices[env->out.device_primary].name, env->codec_name,
919  env->enc_in.w, env->enc_in.h,
920  env->out.fps, env->out.bitrate / 1000);
921  } else {
922  sprintf(buf, "hold");
923  }
924  caption = buf;
925  }
926 
927  /* manage keypad events */
928  /* XXX here we should always check for events,
929  * otherwise the drag will not work */
930  if (env->gui)
931  eventhandler(env, caption);
932 
933  /* sleep for a while */
934  nanosleep(&t, NULL);
935 
936  if (env->in) {
937  struct video_dec_desc *v = env->in;
938 
939  /*
940  * While there is something to display, call the decoder and free
941  * the buffer, possibly enabling the receiver to store new data.
942  */
943  while (v->dec_in_dpy) {
944  struct fbuf_t *tmp = v->dec_in_dpy; /* store current pointer */
945 
946  /* decode the frame, but show it only if not frozen */
947  if (v->d_callbacks->dec_run(v, tmp) && !env->frame_freeze)
948  show_frame(env, WIN_REMOTE);
949  tmp->used = 0; /* mark buffer as free */
950  tmp->ebit = 0;
951  ast_mutex_lock(&env->dec_lock);
952  if (++v->dec_in_dpy == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
953  v->dec_in_dpy = &v->dec_in[0];
954 
955  if (v->dec_in_cur == NULL) /* receiver was idle, enable it... */
956  v->dec_in_cur = tmp; /* using the slot just freed */
957  else if (v->dec_in_dpy == v->dec_in_cur) /* this was the last slot */
958  v->dec_in_dpy = NULL; /* nothing more to display */
959  ast_mutex_unlock(&env->dec_lock);
960  }
961  }
962 
963  if (env->shutdown)
964  break;
965  f = get_video_frames(env, &p); /* read and display */
966  if (!f)
967  continue;
968  chan = env->owner;
969  if (chan == NULL) {
970  /* drop the chain of frames, nobody uses them */
971  while (f) {
972  struct ast_frame *g = AST_LIST_NEXT(f, frame_list);
973  ast_frfree(f);
974  f = g;
975  }
976  continue;
977  }
978  ast_channel_lock(chan);
979 
980  /* AST_LIST_INSERT_TAIL is only good for one frame, cannot use here */
981  if (ast_channel_readq(chan).first == NULL) {
982  ast_channel_readq(chan).first = f;
983  } else {
985  }
986  ast_channel_readq(chan).last = p;
987  /*
988  * more or less same as ast_queue_frame, but extra
989  * write on the alertpipe to signal frames.
990  */
991  if (ast_channel_alertable(chan)) {
992  for (p = f; p; p = AST_LIST_NEXT(p, frame_list)) {
993  if (ast_channel_alert(chan)) {
994  ast_log(LOG_WARNING, "Unable to write to alert pipe on %s, frametype/subclass %d/%d: %s!\n",
995  ast_channel_name(chan), f->frametype, f->subclass, strerror(errno));
996  }
997  }
998  ast_channel_unlock(chan);
999  }
1000  /* thread terminating, here could call the uninit */
1001  /* uninitialize the local and remote video environments */
1002  env->in = dec_uninit(env->in);
1003  video_out_uninit(env);
1004 
1005  if (env->gui)
1006  env->gui = cleanup_sdl(env->gui, env->out.device_num);
1007  ast_mutex_destroy(&env->dec_lock);
1008  env->shutdown = 0;
1009  return NULL;
1010 }
1011 
1012 static void copy_geometry(struct fbuf_t *src, struct fbuf_t *dst)
1013 {
1014  if (dst->w == 0)
1015  dst->w = src->w;
1016  if (dst->h == 0)
1017  dst->h = src->h;
1018 }
1019 
1020 /*! initialize the video environment.
1021  * Apart from the formats (constant) used by sdl and the codec,
1022  * we use enc_in as the basic geometry.
1023  */
1024 static void init_env(struct video_desc *env)
1025 {
1026  struct fbuf_t *c = &(env->out.loc_src_geometry); /* local source */
1027  struct fbuf_t *ei = &(env->enc_in); /* encoder input */
1028  struct fbuf_t *ld = &(env->loc_dpy); /* local display */
1029  struct fbuf_t *rd = &(env->rem_dpy); /* remote display */
1030  int i; /* integer working as iterator */
1031 
1032  c->pix_fmt = PIX_FMT_YUV420P; /* default - camera format */
1033  ei->pix_fmt = PIX_FMT_YUV420P; /* encoder input */
1034  if (ei->w == 0 || ei->h == 0) {
1035  ei->w = 352;
1036  ei->h = 288;
1037  }
1038  ld->pix_fmt = rd->pix_fmt = PIX_FMT_YUV420P; /* sdl format */
1039  /* inherit defaults */
1040  copy_geometry(ei, c); /* camera inherits from encoder input */
1041  copy_geometry(ei, rd); /* remote display inherits from encoder input */
1042  copy_geometry(rd, ld); /* local display inherits from remote display */
1043 
1044  /* fix the size of buffers for small windows */
1045  for (i = 0; i < env->out.device_num; i++) {
1046  env->src_dpy[i].pix_fmt = PIX_FMT_YUV420P;
1047  env->src_dpy[i].w = SRC_WIN_W;
1048  env->src_dpy[i].h = SRC_WIN_H;
1049  }
1050  /* now we set the default coordinates for the picture in picture
1051  frames inside the env_in buffers, those can be changed by dragging the
1052  picture in picture with left click */
1053  env->out.pip_x = ei->w - ei->w/3;
1054  env->out.pip_y = ei->h - ei->h/3;
1055 }
1056 
1057 /*!
1058  * The first call to the video code, called by oss_new() or similar.
1059  * Here we initialize the various components we use, namely SDL for display,
1060  * ffmpeg for encoding/decoding, and a local video source.
1061  * We do our best to progress even if some of the components are not
1062  * available.
1063  */
1064 void console_video_start(struct video_desc *env, struct ast_channel *owner)
1065 {
1066  ast_log(LOG_WARNING, "env %p chan %p\n", env, owner);
1067  if (env == NULL) /* video not initialized */
1068  return;
1069  env->owner = owner; /* work even if no owner is specified */
1070  if (env->vthread)
1071  return; /* already initialized, nothing to do */
1072  init_env(env);
1073  env->out.enc = map_config_video_format(env->codec_name);
1074 
1075  ast_log(LOG_WARNING, "start video out %s %dx%d\n",
1076  env->codec_name, env->enc_in.w, env->enc_in.h);
1077  /*
1078  * Register all codecs supported by the ffmpeg library.
1079  * We only need to do it once, but probably doesn't
1080  * harm to do it multiple times.
1081  */
1082  avcodec_init();
1083  avcodec_register_all();
1084  av_log_set_level(AV_LOG_ERROR); /* only report errors */
1085 
1086  if (env->out.fps == 0) {
1087  env->out.fps = 15;
1088  ast_log(LOG_WARNING, "fps unset, forcing to %d\n", env->out.fps);
1089  }
1090  if (env->out.bitrate == 0) {
1091  env->out.bitrate = 65000;
1092  ast_log(LOG_WARNING, "bitrate unset, forcing to %d\n", env->out.bitrate);
1093  }
1094  /* create the thread as detached so memory is freed on termination */
1096  NULL, video_thread, env);
1097 }
1098 
1099 /*
1100  * Parse a geometry string, accepting also common names for the formats.
1101  * Trick: if we have a leading > or < and a numeric geometry,
1102  * return the larger or smaller one.
1103  * E.g. <352x288 gives the smaller one, 320x240
1104  */
1105 static int video_geom(struct fbuf_t *b, const char *s)
1106 {
1107  int w = 0, h = 0;
1108 
1109  static struct {
1110  const char *s; int w; int h;
1111  } *fp, formats[] = {
1112  {"16cif", 1408, 1152 },
1113  {"xga", 1024, 768 },
1114  {"4cif", 704, 576 },
1115  {"vga", 640, 480 },
1116  {"cif", 352, 288 },
1117  {"qvga", 320, 240 },
1118  {"qcif", 176, 144 },
1119  {"sqcif", 128, 96 },
1120  {NULL, 0, 0 },
1121  };
1122  if (*s == '<' || *s == '>')
1123  sscanf(s+1,"%dx%d", &w, &h);
1124  for (fp = formats; fp->s; fp++) {
1125  if (*s == '>') { /* look for a larger one */
1126  if (fp->w <= w) {
1127  if (fp > formats)
1128  fp--; /* back one step if possible */
1129  break;
1130  }
1131  } else if (*s == '<') { /* look for a smaller one */
1132  if (fp->w < w)
1133  break;
1134  } else if (!strcasecmp(s, fp->s)) { /* look for a string */
1135  break;
1136  }
1137  }
1138  if (*s == '<' && fp->s == NULL) /* smallest */
1139  fp--;
1140  if (fp->s) {
1141  b->w = fp->w;
1142  b->h = fp->h;
1143  } else if (sscanf(s, "%dx%d", &b->w, &b->h) != 2) {
1144  ast_log(LOG_WARNING, "Invalid video_size %s, using 352x288\n", s);
1145  b->w = 352;
1146  b->h = 288;
1147  }
1148  return 0;
1149 }
1150 
1151 
1152 /*! \brief add an entry to the video_device table,
1153  * ignoring duplicate names.
1154  * The table is a static array of 9 elements.
1155  * The last_frame field of each entry of the table is initialized to
1156  * the current time (we need a value inside this field, on stop of the
1157  * GUI the last_frame value is not changed, to avoid checking if it is 0 we
1158  * set the initial value on current time) XXX
1159  *
1160  * PARAMETERS:
1161  * \param devices_p = pointer to the table of devices
1162  * \param device_num_p = pointer to the number of devices
1163  * \param s = name of the new device to insert
1164  *
1165  * returns 0 on success, 1 on error
1166  */
1167 static int device_table_fill(struct video_device *devices, int *device_num_p, const char *s)
1168 {
1169  int i;
1170  struct video_device *p;
1171 
1172  /* with the current implementation, we support a maximum of 9 devices.*/
1173  if (*device_num_p >= 9)
1174  return 0; /* more devices will be ignored */
1175  /* ignore duplicate names */
1176  for (i = 0; i < *device_num_p; i++) {
1177  if (!strcmp(devices[i].name, s))
1178  return 0;
1179  }
1180  /* inserts the new video device */
1181  p = &devices[*device_num_p];
1182  /* XXX the string is allocated but NEVER deallocated,
1183  the good time to do that is when the module is unloaded, now we skip the problem */
1184  p->name = ast_strdup(s); /* copy the name */
1185  /* other fields initially NULL */
1186  p->grabber = NULL;
1187  p->grabber_data = NULL;
1188  p->dev_buf = NULL;
1189  p->last_frame = ast_tvnow();
1190  p->status_index = 0;
1191  (*device_num_p)++; /* one device added */
1192  return 0;
1193 }
1194 
1195 /* extend ast_cli with video commands. Called by console_video_config */
1196 int console_video_cli(struct video_desc *env, const char *var, int fd)
1197 {
1198  if (env == NULL)
1199  return 1; /* unrecognised */
1200 
1201  if (!strcasecmp(var, "videodevice")) {
1202  ast_cli(fd, "videodevice is [%s]\n", env->out.devices[env->out.device_primary].name);
1203  } else if (!strcasecmp(var, "videocodec")) {
1204  ast_cli(fd, "videocodec is [%s]\n", env->codec_name);
1205  } else if (!strcasecmp(var, "sendvideo")) {
1206  ast_cli(fd, "sendvideo is [%s]\n", env->out.sendvideo ? "on" : "off");
1207  } else if (!strcasecmp(var, "video_size")) {
1208  int in_w = 0, in_h = 0;
1209  if (env->in) {
1210  in_w = env->in->dec_out.w;
1211  in_h = env->in->dec_out.h;
1212  }
1213  ast_cli(fd, "sizes: video %dx%d camera %dx%d local %dx%d remote %dx%d in %dx%d\n",
1214  env->enc_in.w, env->enc_in.h,
1215  env->out.loc_src_geometry.w, env->out.loc_src_geometry.h,
1216  env->loc_dpy.w, env->loc_dpy.h,
1217  env->rem_dpy.w, env->rem_dpy.h,
1218  in_w, in_h);
1219  } else if (!strcasecmp(var, "bitrate")) {
1220  ast_cli(fd, "bitrate is [%d]\n", env->out.bitrate);
1221  } else if (!strcasecmp(var, "qmin")) {
1222  ast_cli(fd, "qmin is [%d]\n", env->out.qmin);
1223  } else if (!strcasecmp(var, "fps")) {
1224  ast_cli(fd, "fps is [%d]\n", env->out.fps);
1225  } else if (!strcasecmp(var, "startgui")) {
1226  env->stayopen = 1;
1227  console_video_start(env, NULL);
1228  } else if (!strcasecmp(var, "stopgui") && env->stayopen != 0) {
1229  env->stayopen = 0;
1230  if (env->gui && env->owner)
1231  ast_cli_command(-1, "console hangup");
1232  else /* not in a call */
1233  console_video_uninit(env);
1234  } else {
1235  return 1; /* unrecognised */
1236  }
1237  return 0; /* recognised */
1238 }
1239 
1240 /*! parse config command for video support. */
1241 int console_video_config(struct video_desc **penv,
1242  const char *var, const char *val)
1243 {
1244  struct video_desc *env;
1245 
1246  if (penv == NULL) {
1247  ast_log(LOG_WARNING, "bad argument penv=NULL\n");
1248  return 1; /* error */
1249  }
1250  /* allocate the video descriptor first time we get here */
1251  env = *penv;
1252  if (env == NULL) {
1253  env = *penv = ast_calloc(1, sizeof(struct video_desc));
1254  if (env == NULL) {
1255  ast_log(LOG_WARNING, "fail to allocate video_desc\n");
1256  return 1; /* error */
1257 
1258  }
1259  /* set default values - 0's are already there */
1260  env->out.device_primary = 0;
1261  env->out.device_secondary = 0;
1262  env->out.fps = 5;
1263  env->out.bitrate = 65000;
1264  env->out.sendvideo = 1;
1265  env->out.qmin = 3;
1266  env->out.device_num = 0;
1267  }
1268  CV_START(var, val);
1269  CV_F("videodevice", device_table_fill(env->out.devices, &env->out.device_num, val));
1270  CV_BOOL("sendvideo", env->out.sendvideo);
1271  CV_F("video_size", video_geom(&env->enc_in, val));
1272  CV_F("camera_size", video_geom(&env->out.loc_src_geometry, val));
1273  CV_F("local_size", video_geom(&env->loc_dpy, val));
1274  CV_F("remote_size", video_geom(&env->rem_dpy, val));
1275  CV_STR("keypad", env->keypad_file);
1276  CV_F("region", keypad_cfg_read(env->gui, val));
1277  CV_UINT("startgui", env->stayopen); /* enable gui at startup */
1278  CV_STR("keypad_font", env->keypad_font);
1279  CV_STR("sdl_videodriver", env->sdl_videodriver);
1280  CV_UINT("fps", env->out.fps);
1281  CV_UINT("bitrate", env->out.bitrate);
1282  CV_UINT("qmin", env->out.qmin);
1283  CV_STR("videocodec", env->codec_name);
1284  return 1; /* nothing found */
1285 
1286  CV_END; /* the 'nothing found' case */
1287  return 0; /* found something */
1288 }
1289 
1290 #endif /* video support */
static void sdl_setup(struct video_desc *env)
Definition: console_gui.c:99
#define ast_channel_lock(chan)
Definition: channel.h:2945
Main Channel structure associated with a channel.
int print_message(struct board *b, const char *s)
Asterisk main include file. File version handling, generic pbx functions.
#define N_DEC_IN
Definition: vcodecs.c:92
void *(* open)(const char *name, struct fbuf_t *geom, int fps)
Definition: console_video.h:82
struct ast_frame * first
Definition: channel.h:898
struct ast_frame * last
Definition: channel.h:898
Definition: ast_expr2.c:325
void fbuf_free(struct fbuf_t *)
char buf[BUFSIZE]
Definition: eagi_proxy.c:66
struct fbuf_t dec_in[N_DEC_IN]
Definition: vcodecs.c:95
#define CV_UINT(__x, __dst)
#define LOG_WARNING
Definition: logger.h:274
static void dummy(char *unused,...)
Definition: chan_unistim.c:220
static int tmp()
Definition: bt_open.c:389
#define var
Definition: ast_expr2f.c:614
struct video_codec_desc * d_callbacks
Definition: vcodecs.c:85
struct ast_frame::@264 frame_list
int pix_fmt
Definition: console_video.h:69
#define AST_LIST_NEXT(elm, field)
Returns the next entry in the list after the given entry.
Definition: linkedlists.h:438
int used
Definition: console_video.h:63
void console_video_start(struct video_desc *env, struct ast_channel *owner)
struct timeval ast_tvnow(void)
Returns current timeval. Meant to replace calls to gettimeofday().
Definition: time.h:150
#define ast_mutex_lock(a)
Definition: lock.h:187
static struct test_val c
int64_t ast_tvdiff_ms(struct timeval end, struct timeval start)
Computes the difference (in milliseconds) between two struct timeval instances.
Definition: time.h:98
#define ast_strdup(str)
A wrapper for strdup()
Definition: astmm.h:243
Generic File Format Support. Should be included by clients of the file handling routines. File service providers should instead include mod_format.h.
static struct gui_info * cleanup_sdl(struct gui_info *g, int n)
Definition: console_gui.c:100
#define NULL
Definition: resample.c:96
int console_video_formats
#define CV_END
close a variable parsing block
void ast_cli(int fd, const char *fmt,...)
Definition: clicompat.c:6
struct grab_desc * console_grabbers[]
#define CV_START(__in_var, __in_val)
the macro to open a block for variable parsing
uint16_t next_seq
Definition: vcodecs.c:90
#define ast_pthread_create_detached_background(a, b, c, d)
Definition: utils.h:572
#define CV_STR(__x, __dst)
int console_write_video(struct ast_channel *chan, struct ast_frame *f)
struct ast_frame_subclass subclass
static void show_frame(struct video_desc *env, int out)
Definition: console_gui.c:98
int win_x
Definition: console_video.h:71
#define ast_strlen_zero(foo)
Definition: strings.h:52
static enum CodecID map_video_format(uint32_t ast_format, int rw)
map an asterisk format into an ffmpeg one
Definition: vcodecs.c:1133
int console_video_config(struct video_desc **penv, const char *var, const char *val)
#define CV_F(__pattern, __body)
call a generic function if the name matches.
struct ast_readq_list * ast_channel_readq(struct ast_channel *chan)
int win_h
Definition: console_video.h:74
#define ast_log
Definition: astobj2.c:42
General Asterisk PBX channel definitions.
FILE * in
Definition: utils/frame.c:33
const char * src
int win_y
Definition: console_video.h:72
#define MAX_VIDEO_SOURCES
Definition: console_video.h:51
static struct video_dec_desc * dec_init(uint32_t the_ast_format)
Definition: vcodecs.c:1204
ast_cli_command
calling arguments for new-style handlers.
Definition: cli.h:151
struct fbuf_t * dec_in_dpy
Definition: vcodecs.c:94
struct fbuf_t * dec_in_cur
Definition: vcodecs.c:93
#define LOG_ERROR
Definition: logger.h:285
#define SRC_WIN_H
Definition: console_video.h:47
uint8_t * data
Definition: console_video.h:60
#define CV_BOOL(__x, __dst)
helper macros to assign the value to a BOOL, UINT, static string and dynamic string ...
static int len(struct ast_channel *chan, const char *cmd, char *data, char *buf, size_t buflen)
int errno
Definition: file.c:69
struct sla_ringing_trunk * first
Definition: app_meetme.c:1092
#define LOG_NOTICE
Definition: logger.h:263
int win_w
Definition: console_video.h:73
#define ast_channel_unlock(chan)
Definition: channel.h:2946
static const char name[]
Definition: cdr_mysql.c:74
#define ast_free(a)
Definition: astmm.h:182
#define ast_calloc(num, len)
A wrapper for calloc()
Definition: astmm.h:204
static struct video_dec_desc * dec_uninit(struct video_dec_desc *v)
uninitialize the descriptor for remote video stream
Definition: vcodecs.c:1172
static int keypad_cfg_read(struct gui_info *gui, const char *val)
Definition: console_gui.c:102
int console_video_cli(struct video_desc *env, const char *var, int fd)
int ebit
Definition: console_video.h:64
static void eventhandler(struct video_desc *env, const char *caption)
Definition: console_gui.c:101
#define SRC_WIN_W
Definition: console_video.h:46
void console_video_uninit(struct video_desc *env)
FILE * out
Definition: utils/frame.c:33
Standard Command Line Interface.
int get_gui_startup(struct video_desc *env)
void ast_copy_string(char *dst, const char *src, size_t size)
Size-limited null-terminating string copy.
Definition: strings.h:401
const char * ast_channel_name(const struct ast_channel *chan)
struct ast_frame * next
#define ast_frfree(fr)
Data structure associated with a single frame of data.
static struct test_val b
int setenv(const char *name, const char *value, int overwrite)
union ast_frame::@263 data
enum ast_frame_type frametype
#define ast_mutex_init(pmutex)
Definition: lock.h:184
#define ast_mutex_destroy(a)
Definition: lock.h:186
static const struct video_codec_desc * supported_codecs[]
Definition: vcodecs.c:1145
decoder_decap_f dec_decap
Definition: vcodecs.c:66
struct video_desc * get_video_desc(struct ast_channel *c)
return the pointer to the video descriptor
Definition: chan_oss.c:319
int size
Definition: console_video.h:62
int unsetenv(const char *name)
Structure for mutex and tracking information.
Definition: lock.h:135
Definition: env.py:1
#define ast_mutex_unlock(a)
Definition: lock.h:188
static struct test_val a
decoder_decode_f dec_run
Definition: vcodecs.c:67