diff --git a/channels/console_gui.c b/channels/console_gui.c
new file mode 100644
index 0000000000000000000000000000000000000000..4405cc70970e2043ef8cbf64180bdbe11057851a
--- /dev/null
+++ b/channels/console_gui.c
@@ -0,0 +1,880 @@
+/*
+ * GUI for console video.
+ * The routines here are in charge of loading the keypad and handling events.
+ * $Revision$
+ */
+
+static void cleanup_sdl(struct video_desc *env)  
+{
+	int i;
+
+#ifdef HAVE_SDL_TTF
+	/* unload font file */ 
+	if (env->gui.font) {
+		TTF_CloseFont(env->gui.font);
+		env->gui.font = NULL; 
+	}
+
+	/* uninitialize SDL_ttf library */
+	if ( TTF_WasInit() )
+		TTF_Quit();
+#endif
+
+	/* uninitialize the SDL environment */
+	for (i = 0; i < WIN_MAX; i++) {
+		if (env->win[i].bmp)
+			SDL_FreeYUVOverlay(env->win[i].bmp);
+	}
+	if (env->gui.keypad)
+		SDL_FreeSurface(env->gui.keypad);
+	env->gui.keypad = NULL;
+	SDL_Quit();
+	env->screen = NULL; /* XXX check reference */
+	bzero(env->win, sizeof(env->win));
+	if (env->sdl_ok)
+		ast_mutex_destroy(&(env->in.dec_in_lock));
+}
+
+/*
+ * Display video frames (from local or remote stream) using the SDL library.
+ * - Set the video mode to use the resolution specified by the codec context
+ * - Create a YUV Overlay to copy the frame into it;
+ * - After the frame is copied into the overlay, display it
+ *
+ * The size is taken from the configuration.
+ *
+ * 'out' is 0 for remote video, 1 for the local video
+ */
+static void show_frame(struct video_desc *env, int out)
+{
+	AVPicture *p_in, p_out;
+	struct fbuf_t *b_in, *b_out;
+	SDL_Overlay *bmp;
+
+	if (!env->sdl_ok)
+		return;
+
+	if (out == WIN_LOCAL) {	/* webcam/x11 to sdl */
+		b_in = &env->out.enc_in;
+		b_out = &env->out.loc_dpy;
+		p_in = NULL;
+	} else {
+		/* copy input format from the decoding context */
+		AVCodecContext *c = env->in.dec_ctx;
+		b_in = &env->in.dec_out;
+                b_in->pix_fmt = c->pix_fmt;
+                b_in->w = c->width;
+                b_in->h = c->height;
+
+		b_out = &env->in.rem_dpy;
+		p_in = (AVPicture *)env->in.d_frame;
+	}
+	bmp = env->win[out].bmp;
+	SDL_LockYUVOverlay(bmp);
+	/* output picture info - this is sdl, YUV420P */
+	bzero(&p_out, sizeof(p_out));
+	p_out.data[0] = bmp->pixels[0];
+	p_out.data[1] = bmp->pixels[1];
+	p_out.data[2] = bmp->pixels[2];
+	p_out.linesize[0] = bmp->pitches[0];
+	p_out.linesize[1] = bmp->pitches[1];
+	p_out.linesize[2] = bmp->pitches[2];
+
+	my_scale(b_in, p_in, b_out, &p_out);
+
+	/* lock to protect access to Xlib by different threads. */
+	SDL_DisplayYUVOverlay(bmp, &env->win[out].rect);
+	SDL_UnlockYUVOverlay(bmp);
+}
+
+/*
+ * GUI layout, structure and management
+ *
+
+For the GUI we use SDL to create a large surface (env->screen)
+containing tree sections: remote video on the left, local video
+on the right, and the keypad with all controls and text windows
+in the center.
+The central section is built using two images: one is the skin,
+the other one is a mask where the sensitive areas of the skin
+are colored in different grayscale levels according to their
+functions. The mapping between colors and function is defined
+in the 'enum pixel_value' below.
+
+Mouse and keyboard events are detected on the whole surface, and
+handled differently according to their location, as follows:
+
+- drag on the local video window are used to move the captured
+  area (in the case of X11 grabber) or the picture-in-picture
+  location (in case of camera included on the X11 grab).
+- click on the keypad are mapped to the corresponding key;
+- drag on some keypad areas (sliders etc.) are mapped to the
+  corresponding functions;
+- keystrokes are used as keypad functions, or as text input
+  if we are in text-input mode.
+
+To manage these behavior we use two status variables,
+that defines if keyboard events should be redirect to dialing functions
+or to write message functions, and if mouse events should be used
+to implement keypad functionalities or to drag the capture device.
+
+Configuration options control the appeareance of the gui:
+
+    keypad = /tmp/phone.jpg		; the keypad on the screen
+    keypad_font = /tmp/font.ttf		; the font to use for output
+
+ *
+ */
+
+/* enumerate for the pixel value. 0..127 correspond to ascii chars */
+enum pixel_value {
+	/* answer/close functions */
+	KEY_PICK_UP = 128,
+	KEY_HANG_UP = 129,
+
+	/* other functions */
+	KEY_MUTE = 130,
+	KEY_AUTOANSWER = 131,
+	KEY_SENDVIDEO = 132,
+	KEY_LOCALVIDEO = 133,
+	KEY_REMOTEVIDEO = 134,
+	KEY_WRITEMESSAGE = 135,
+	KEY_GUI_CLOSE = 136,		/* close gui */
+
+	/* other areas within the keypad */
+	KEY_DIGIT_BACKGROUND = 255,
+
+	/* areas outside the keypad - simulated */
+	KEY_OUT_OF_KEYPAD = 251,
+	KEY_REM_DPY = 252,
+	KEY_LOC_DPY = 253,
+};
+
+/*
+ * Handlers for the various keypad functions
+ */
+
+/*! \brief append a character, or reset if '\0' */
+static void append_char(char *str, int *str_pos, const char c)
+{
+	int i = *str_pos;
+	if (c == '\0')
+		i = 0;
+	else if (i < GUI_BUFFER_LEN - 1)
+		str[i++] = c;
+	else
+		i = GUI_BUFFER_LEN - 1; /* unnecessary, i think */
+	str = '\0';
+	*str_pos = i;
+}
+
+/* accumulate digits, possibly call dial if in connected mode */
+static void keypad_digit(struct video_desc *env, int digit)
+{	
+	if (env->owner) {		/* we have a call, send the digit */
+		struct ast_frame f = { AST_FRAME_DTMF, 0 };
+
+		f.subclass = digit;
+		ast_queue_frame(env->owner, &f);
+	} else {		/* no call, accumulate digits */
+		append_char(env->gui.inbuf, &env->gui.inbuf_pos, digit);
+	}
+}
+
+/* this is a wrapper for actions that are available through the cli */
+/* TODO append arg to command and send the resulting string as cli command */
+static void keypad_send_command(struct video_desc *env, char *command)
+{	
+	ast_log(LOG_WARNING, "keypad_send_command(%s) called\n", command);
+	ast_cli_command(env->gui.outfd, command);
+	return;
+}
+
+/* function used to toggle on/off the status of some variables */
+static char *keypad_toggle(struct video_desc *env, int index)
+{
+	ast_log(LOG_WARNING, "keypad_toggle(%i) called\n", index);
+
+	switch (index) {
+	case KEY_SENDVIDEO:
+		env->out.sendvideo = !env->out.sendvideo;
+		break;
+#ifdef notyet
+	case KEY_MUTE: {
+		struct chan_oss_pvt *o = find_desc(oss_active);
+		o->mute = !o->mute;
+		}
+		break;
+	case KEY_AUTOANSWER: {
+		struct chan_oss_pvt *o = find_desc(oss_active);
+		o->autoanswer = !o->autoanswer;
+		}
+		break;
+#endif
+	}
+	return NULL;
+}
+
+char *console_do_answer(int fd);
+/*
+ * Function called when the pick up button is pressed
+ * perform actions according the channel status:
+ *
+ *  - if no one is calling us and no digits was pressed,
+ *    the operation have no effects,
+ *  - if someone is calling us we answer to the call.
+ *  - if we have no call in progress and we pressed some
+ *    digit, send the digit to the console.
+ */
+static void keypad_pick_up(struct video_desc *env)
+{
+	ast_log(LOG_WARNING, "keypad_pick_up called\n");
+
+	if (env->owner) { /* someone is calling us, just answer */
+		console_do_answer(-1);
+	} else if (env->gui.inbuf_pos) { /* we have someone to call */
+		ast_cli_command(env->gui.outfd, env->gui.inbuf);
+	}
+
+	append_char(env->gui.inbuf, &env->gui.inbuf_pos, '\0'); /* clear buffer */
+}
+
+#if 0 /* still unused */
+/*
+ * As an alternative to SDL_TTF, we can simply load the font from
+ * an image and blit characters on the background of the GUI.
+ *
+ * To generate a font we can use the 'fly' command with the
+ * following script (3 lines with 32 chars each)
+ 
+size 320,64
+name font.png
+transparent 0,0,0
+string 255,255,255,  0, 0,giant, !"#$%&'()*+,-./0123456789:;<=>?
+string 255,255,255,  0,20,giant,@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+string 255,255,255,  0,40,giant,`abcdefghijklmnopqrstuvwxyz{|}~
+end
+
+ */
+
+/* Print given text on the gui */
+static int gui_output(struct video_desc *env, const char *text)
+{
+#ifndef HAVE_SDL_TTF
+	return 1;	/* error, not supported */
+#else
+	int x = 30, y = 20;	/* XXX change */
+	SDL_Surface *output = NULL;
+	SDL_Color color = {0, 0, 0};	/* text color */
+	SDL_Rect dest = {env->win[WIN_KEYPAD].rect.x + x, y};
+
+	/* clean surface each rewrite */
+	SDL_BlitSurface(env->gui.keypad, NULL, env->screen, &env->win[WIN_KEYPAD].rect);
+
+	output = TTF_RenderText_Solid(env->gui.font, text, color);
+	if (output == NULL) {
+		ast_log(LOG_WARNING, "Cannot render text on gui - %s\n", TTF_GetError());
+		return 1;
+	}
+
+	SDL_BlitSurface(output, NULL, env->screen, &dest);
+	
+	SDL_UpdateRects(env->gui.keypad, 1, &env->win[WIN_KEYPAD].rect);
+	SDL_FreeSurface(output);
+	return 0;	/* success */
+#endif
+}
+#endif 
+
+static int video_geom(struct fbuf_t *b, const char *s);
+static void sdl_setup(struct video_desc *env);
+static int kp_match_area(const struct keypad_entry *e, int x, int y);
+
+/*
+ * Handle SDL_MOUSEBUTTONDOWN type, finding the palette
+ * index value and calling the right callback.
+ *
+ * x, y are referred to the upper left corner of the main SDL window.
+ */
+static void handle_button_event(struct video_desc *env, SDL_MouseButtonEvent button)
+{
+	uint8_t index = KEY_OUT_OF_KEYPAD;	/* the key or region of the display we clicked on */
+
+	/* for each click we come back in normal mode */
+	env->gui.text_mode = 0;
+
+	/* define keypad boundary */
+	if (button.x < env->in.rem_dpy.w)
+		index = KEY_REM_DPY; /* click on remote video */
+	else if (button.x > env->in.rem_dpy.w + env->out.keypad_dpy.w)
+		index = KEY_LOC_DPY; /* click on local video */
+	else if (button.y > env->out.keypad_dpy.h)
+		index = KEY_OUT_OF_KEYPAD; /* click outside the keypad */
+	else if (env->gui.kp) {
+		int i;
+		for (i = 0; i < env->gui.kp_used; i++) {
+			if (kp_match_area(&env->gui.kp[i], button.x - env->in.rem_dpy.w, button.y)) {
+				index = env->gui.kp[i].c;
+				break;
+			}
+		}
+	}
+
+	/* exec the function */
+	if (index < 128) {	/* surely clicked on the keypad, don't care which key */
+		keypad_digit(env, index);
+		return;
+	}
+	switch (index) {
+	/* answer/close function */
+	case KEY_PICK_UP:
+		keypad_pick_up(env);
+		break;
+	case KEY_HANG_UP:
+		keypad_send_command(env, "console hangup");
+		break;
+
+	/* other functions */
+	case KEY_MUTE:
+	case KEY_AUTOANSWER:
+	case KEY_SENDVIDEO:
+		keypad_toggle(env, index);
+		break;
+
+	case KEY_LOCALVIDEO:
+		break;
+	case KEY_REMOTEVIDEO:
+		break;
+	case KEY_WRITEMESSAGE:
+		/* goes in text-mode */
+		env->gui.text_mode = 1;
+		break;
+
+
+	/* press outside the keypad. right increases size, center decreases, left drags */
+	case KEY_LOC_DPY:
+	case KEY_REM_DPY:
+		if (button.button == SDL_BUTTON_LEFT) {
+			if (index == KEY_LOC_DPY) {
+				/* store points where the drag start
+				* and switch in drag mode */
+				env->gui.x_drag = button.x;
+				env->gui.y_drag = button.y;
+				env->gui.drag_mode = 1;
+			}
+			break;
+		} else {
+			char buf[128];
+			struct fbuf_t *fb = index == KEY_LOC_DPY ? &env->out.loc_dpy : &env->in.rem_dpy;
+			sprintf(buf, "%c%dx%d", button.button == SDL_BUTTON_RIGHT ? '>' : '<',
+				fb->w, fb->h);
+			video_geom(fb, buf);
+			sdl_setup(env);
+		}
+		break;
+	case KEY_OUT_OF_KEYPAD:
+		break;
+
+	case KEY_GUI_CLOSE:
+		cleanup_sdl(env);
+		break;
+	case KEY_DIGIT_BACKGROUND:
+		break;
+	default:
+		ast_log(LOG_WARNING, "function not yet defined %i\n", index);
+	}
+}
+
+/*
+ * Handle SDL_KEYDOWN type event, put the key pressed
+ * in the dial buffer or in the text-message buffer,
+ * depending on the text_mode variable value.
+ *
+ * key is the SDLKey structure corresponding to the key pressed.
+ */
+static void handle_keyboard_input(struct video_desc *env, SDLKey key)
+{
+	if (env->gui.text_mode) {
+		/* append in the text-message buffer */
+		if (key == SDLK_RETURN) {
+			/* send the text message and return in normal mode */
+			env->gui.text_mode = 0;
+			keypad_send_command(env, "send text");
+		} else {
+			/* accumulate the key in the message buffer */
+			append_char(env->gui.msgbuf, &env->gui.msgbuf_pos, key);
+		}
+	}
+	else {
+		/* append in the dial buffer */
+		append_char(env->gui.inbuf, &env->gui.inbuf_pos, key);
+	}
+
+	return;
+}
+
+/*
+ * Check if the grab point is inside the X screen.
+ *
+ * x represent the new grab value
+ * limit represent the upper value to use
+ */
+static int boundary_checks(int x, int limit)
+{
+	return (x <= 0) ? 0 : (x > limit ? limit : x);
+}
+
+/* implement superlinear acceleration on the movement */
+static int move_accel(int delta)
+{
+	int d1 = delta*delta / 100;
+	return (delta > 0) ? delta + d1 : delta - d1;
+}
+
+/*
+ * Move the source of the captured video.
+ *
+ * x_final_drag and y_final_drag are the coordinates where the drag ends,
+ * start coordinares are in the gui_info structure.
+ */
+static void move_capture_source(struct video_desc *env, int x_final_drag, int y_final_drag)
+{
+	int new_x, new_y;		/* new coordinates for grabbing local video */
+	int x = env->out.loc_src.x;	/* old value */
+	int y = env->out.loc_src.y;	/* old value */
+
+	/* move the origin */
+#define POLARITY -1		/* +1 or -1 depending on the desired direction */
+	new_x = x + POLARITY*move_accel(x_final_drag - env->gui.x_drag) * 3;
+	new_y = y + POLARITY*move_accel(y_final_drag - env->gui.y_drag) * 3;
+#undef POLARITY
+	env->gui.x_drag = x_final_drag;	/* update origin */
+	env->gui.y_drag = y_final_drag;
+
+	/* check boundary and let the source to grab from the new points */
+	env->out.loc_src.x = boundary_checks(new_x, env->out.screen_width - env->out.loc_src.w);
+	env->out.loc_src.y = boundary_checks(new_y, env->out.screen_height - env->out.loc_src.h);
+	return;
+}
+
+/*
+ * I am seeing some kind of deadlock or stall around
+ * SDL_PumpEvents() while moving the window on a remote X server
+ * (both xfree-4.4.0 and xorg 7.2)
+ * and windowmaker. It is unclear what causes it.
+ */
+
+/* grab a bunch of events */
+static void eventhandler(struct video_desc *env)
+{
+#define N_EVENTS	32
+	int i, n;
+	SDL_Event ev[N_EVENTS];
+
+#define MY_EV (SDL_MOUSEBUTTONDOWN|SDL_KEYDOWN)
+	while ( (n = SDL_PeepEvents(ev, N_EVENTS, SDL_GETEVENT, SDL_ALLEVENTS)) > 0) {
+		for (i = 0; i < n; i++) {
+#if 0
+			ast_log(LOG_WARNING, "------ event %d at %d %d\n",
+				ev[i].type,  ev[i].button.x,  ev[i].button.y);
+#endif
+			switch (ev[i].type) {
+			case SDL_KEYDOWN:
+				handle_keyboard_input(env, ev[i].key.keysym.sym);
+				break;
+			case SDL_MOUSEMOTION:
+				if (env->gui.drag_mode != 0)
+					move_capture_source(env, ev[i].motion.x, ev[i].motion.y);
+				break;
+			case SDL_MOUSEBUTTONDOWN:
+				handle_button_event(env, ev[i].button);
+				break;
+			case SDL_MOUSEBUTTONUP:
+				if (env->gui.drag_mode != 0) {
+					move_capture_source(env, ev[i].button.x, ev[i].button.y);
+					env->gui.drag_mode = 0;
+				}
+				break;
+			}
+
+		}
+	}
+	if (1) {
+		struct timeval b, a = ast_tvnow();
+		int i;
+		//SDL_Lock_EventThread();
+		SDL_PumpEvents();
+		b = ast_tvnow();
+		i = ast_tvdiff_ms(b, a);
+		if (i > 3)
+			fprintf(stderr, "-------- SDL_PumpEvents took %dms\n", i);
+		//SDL_Unlock_EventThread();
+	}
+}
+
+static SDL_Surface *get_keypad(const char *file)
+{
+	SDL_Surface *temp;
+ 
+#ifdef HAVE_SDL_IMAGE
+	temp = IMG_Load(file);
+#else
+	temp = SDL_LoadBMP(file);
+#endif
+	if (temp == NULL)
+		fprintf(stderr, "Unable to load image %s: %s\n",
+			file, SDL_GetError());
+	return temp;
+}
+
+/* TODO: consistency checks, check for bpp, widht and height */
+/* Init the mask image used to grab the action. */
+static int gui_init(struct video_desc *env)
+{
+	/* initialize keypad status */
+	env->gui.text_mode = 0;
+	env->gui.drag_mode = 0;
+
+	/* initialize grab coordinates */
+	env->out.loc_src.x = 0;
+	env->out.loc_src.y = 0;
+
+	/* initialize keyboard buffer */
+	append_char(env->gui.inbuf, &env->gui.inbuf_pos, '\0');
+	append_char(env->gui.msgbuf, &env->gui.msgbuf_pos, '\0');
+
+#ifdef HAVE_SDL_TTF
+	/* Initialize SDL_ttf library and load font */
+	if (TTF_Init() == -1) {
+		ast_log(LOG_WARNING, "Unable to init SDL_ttf, no output available\n");
+		return -1;
+	}
+
+#define GUI_FONTSIZE 28
+	env->gui.font = TTF_OpenFont( env->keypad_font, GUI_FONTSIZE);
+	if (!env->gui.font) {
+		ast_log(LOG_WARNING, "Unable to load font %s, no output available\n", env->keypad_font);
+		return -1;
+	}
+	ast_log(LOG_WARNING, "Loaded font %s\n", env->keypad_font);
+#endif
+
+	env->gui.outfd = open ("/dev/null", O_WRONLY);	/* discard output, temporary */
+	if ( env->gui.outfd < 0 ) {
+		ast_log(LOG_WARNING, "Unable output fd\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/* setup an sdl overlay and associated info, return 0 on success, != 0 on error */
+static int set_win(SDL_Surface *screen, struct display_window *win, int fmt,
+	int w, int h, int x, int y)
+{
+	win->bmp = SDL_CreateYUVOverlay(w, h, fmt, screen);
+	if (win->bmp == NULL)
+		return -1;	/* error */
+	win->rect.x = x;
+	win->rect.y = y;
+	win->rect.w = w;
+	win->rect.h = h;
+	return 0;
+}
+
+static int keypad_cfg_read(struct gui_info *gui, const char *val);
+
+static void keypad_setup(struct video_desc *env)
+{
+	int fd = -1;
+	void *p = NULL;
+	off_t l = 0;
+
+	if (env->gui.keypad)
+		return;
+	env->gui.keypad = get_keypad(env->keypad_file);
+	if (!env->gui.keypad)
+		return;
+
+	env->out.keypad_dpy.w = env->gui.keypad->w;
+	env->out.keypad_dpy.h = env->gui.keypad->h;
+	/*
+	 * If the keypad image has a comment field, try to read
+	 * the button location from there. The block must be
+	 *	keypad_entry = token shape x0 y0 x1 y1 h
+	 *	...
+	 * (basically, lines have the same format as config file entries.
+	 * same as the keypad_entry.
+	 * You can add it to a jpeg file using wrjpgcom
+	 */
+	do { /* only once, in fact */
+		const char region[] = "region";
+		int reg_len = strlen(region);
+		const unsigned char *s, *e;
+
+		fd = open(env->keypad_file, O_RDONLY);
+		if (fd < 0) {
+			ast_log(LOG_WARNING, "fail to open %s\n", env->keypad_file);
+			break;
+		}
+		l = lseek(fd, 0, SEEK_END);
+		if (l <= 0) {
+			ast_log(LOG_WARNING, "fail to lseek %s\n", env->keypad_file);
+			break;
+		}
+		p = mmap(NULL, l, PROT_READ, 0, fd, 0);
+		if (p == NULL) {
+			ast_log(LOG_WARNING, "fail to mmap %s size %ld\n", env->keypad_file, (long)l);
+			break;
+		}
+		e = (const unsigned char *)p + l;
+		for (s = p; s < e - 20 ; s++) {
+			if (!memcmp(s, region, reg_len)) { /* keyword found */
+				/* reset previous entries */
+				keypad_cfg_read(&env->gui, "reset");
+				break;
+			}
+		}
+		for ( ;s < e - 20; s++) {
+			char buf[256];
+			const unsigned char *s1;
+			if (index(" \t\r\n", *s))	/* ignore blanks */
+				continue;
+			if (*s > 127)	/* likely end of comment */
+				break;
+			if (memcmp(s, region, reg_len)) /* keyword not found */
+				break;
+			s += reg_len;
+			l = MIN(sizeof(buf), e - s);
+			ast_copy_string(buf, s, l);
+			s1 = ast_skip_blanks(buf);	/* between token and '=' */
+			if (*s1++ != '=')	/* missing separator */
+				break;
+			if (*s1 == '>')	/* skip => */
+				s1++;
+			keypad_cfg_read(&env->gui, ast_skip_blanks(s1));
+			/* now wait for a newline */
+			s1 = s;
+			while (s1 < e - 20 && !index("\r\n", *s1) && *s1 < 128)
+				s1++;
+			s = s1;
+		}
+	} while (0);
+	if (p)
+		munmap(p, l);
+	if (fd >= 0)
+		close(fd);
+}
+
+/* [re]set the main sdl window, useful in case of resize */
+static void sdl_setup(struct video_desc *env)
+{
+	int dpy_fmt = SDL_IYUV_OVERLAY;	/* YV12 causes flicker in SDL */
+	int depth, maxw, maxh;
+	const SDL_VideoInfo *info = SDL_GetVideoInfo();
+
+	/* We want at least 16bpp to support YUV overlays.
+	 * E.g with SDL_VIDEODRIVER = aalib the default is 8
+	 */
+	depth = info->vfmt->BitsPerPixel;
+	if (depth < 16)
+		depth = 16;
+	/*
+	 * initialize the SDL environment. We have one large window
+	 * with local and remote video, and a keypad.
+	 * At the moment we arrange them statically, as follows:
+	 * - on the left, the remote video;
+	 * - on the center, the keypad
+	 * - on the right, the local video
+	 */
+
+	keypad_setup(env);
+#define BORDER	5	/* border around our windows */
+	maxw = env->in.rem_dpy.w + env->out.loc_dpy.w + env->out.keypad_dpy.w;
+	maxh = MAX( MAX(env->in.rem_dpy.h, env->out.loc_dpy.h), env->out.keypad_dpy.h);
+	maxw += 4 * BORDER;
+	maxh += 2 * BORDER;
+	env->screen = SDL_SetVideoMode(maxw, maxh, depth, 0);
+	if (!env->screen) {
+		ast_log(LOG_ERROR, "SDL: could not set video mode - exiting\n");
+		goto no_sdl;
+	}
+
+	SDL_WM_SetCaption("Asterisk console Video Output", NULL);
+	if (set_win(env->screen, &env->win[WIN_REMOTE], dpy_fmt,
+			env->in.rem_dpy.w, env->in.rem_dpy.h, BORDER, BORDER))
+		goto no_sdl;
+	if (set_win(env->screen, &env->win[WIN_LOCAL], dpy_fmt,
+			env->out.loc_dpy.w, env->out.loc_dpy.h,
+			3*BORDER+env->in.rem_dpy.w + env->out.keypad_dpy.w, BORDER))
+		goto no_sdl;
+
+	/* display the skin, but do not free it as we need it later to
+	 * restore text areas and maybe sliders too.
+	 */
+	if (env->gui.keypad) {
+		struct SDL_Rect *dest = &env->win[WIN_KEYPAD].rect;
+		dest->x = 2*BORDER + env->in.rem_dpy.w;
+		dest->y = BORDER;
+		dest->w = env->gui.keypad->w;
+		dest->h = env->gui.keypad->h;
+		SDL_BlitSurface(env->gui.keypad, NULL, env->screen, dest);
+		SDL_UpdateRects(env->screen, 1, dest);
+	}
+	env->in.dec_in_cur = &env->in.dec_in[0];
+	env->in.dec_in_dpy = NULL;	/* nothing to display */
+	env->sdl_ok = 1;
+
+no_sdl:
+	if (env->sdl_ok == 0)	/* free resources in case of errors */
+		cleanup_sdl(env);
+}
+
+/*
+ * Functions to determine if a point is within a region. Return 1 if success.
+ * First rotate the point, with
+ *	x' =  (x - x0) * cos A + (y - y0) * sin A
+ *	y' = -(x - x0) * sin A + (y - y0) * cos A
+ * where cos A = (x1-x0)/l, sin A = (y1 - y0)/l, and
+ *	l = sqrt( (x1-x0)^2 + (y1-y0)^2
+ * Then determine inclusion by simple comparisons i.e.:
+ *	rectangle: x >= 0 && x < l && y >= 0 && y < h
+ *	ellipse: (x-xc)^2/l^2 + (y-yc)^2/h2 < 1
+ */
+static int kp_match_area(const struct keypad_entry *e, int x, int y)
+{
+	double xp, dx = (e->x1 - e->x0);
+	double yp, dy = (e->y1 - e->y0);
+	double l = sqrt(dx*dx + dy*dy);
+	int ret = 0;
+
+	if (l > 1) { /* large enough */
+		xp = ((x - e->x0)*dx + (y - e->y0)*dy)/l;
+		yp = (-(x - e->x0)*dy + (y - e->y0)*dx)/l;
+		if (e->type == KP_RECT) {
+			ret = (xp >= 0 && xp < l && yp >=0 && yp < l);
+		} else if (e->type == KP_CIRCLE) {
+			dx = xp*xp/(l*l) + yp*yp/(e->h*e->h);
+			ret = (dx < 1);
+		}
+	}
+#if 0
+	ast_log(LOG_WARNING, "result %d [%d] for match %d,%d in type %d p0 %d,%d p1 %d,%d h %d\n",
+		ret, e->c, x, y, e->type, e->x0, e->y0, e->x1, e->y1, e->h);
+#endif
+	return ret;
+}
+
+/*
+ * read a keypad entry line in the format
+ *	reset
+ *	token circle xc yc diameter
+ *	token circle xc yc x1 y1 h	# ellipse, main diameter and height
+ *	token rect x0 y0 x1 y1 h	# rectangle with main side and eight
+ * token is the token to be returned, either a character or a symbol
+ * as KEY_* above
+ */
+struct _s_k { const char *s; int k; };
+static struct _s_k gui_key_map[] = {
+	{"PICK_UP",	KEY_PICK_UP },
+	{"PICKUP",	KEY_PICK_UP },
+        {"HANG_UP",	KEY_HANG_UP },
+        {"HANGUP",	KEY_HANG_UP },
+        {"MUTE",	KEY_MUTE },
+        {"AUTOANSWER",	KEY_AUTOANSWER },
+        {"SENDVIDEO",	KEY_SENDVIDEO },
+        {"LOCALVIDEO",	KEY_LOCALVIDEO },
+        {"REMOTEVIDEO",	KEY_REMOTEVIDEO },
+        {"WRITEMESSAGE", KEY_WRITEMESSAGE },
+        {"GUI_CLOSE",	KEY_GUI_CLOSE },
+        {NULL, 0 } };
+
+static int keypad_cfg_read(struct gui_info *gui, const char *val)
+{
+	struct keypad_entry e;
+	char s1[16], s2[16];
+	int i, ret = 0;
+
+	bzero(&e, sizeof(e));
+	i = sscanf(val, "%14s %14s %d %d %d %d %d",
+                s1, s2, &e.x0, &e.y0, &e.x1, &e.y1, &e.h);
+
+	switch (i) {
+	default:
+		break;
+	case 1:	/* only "reset" is allowed */
+		if (strcasecmp(s1, "reset"))	/* invalid */
+			break;
+		if (gui->kp) {
+			gui->kp_used = 0;
+		}
+		break;
+	case 5: /* token circle xc yc diameter */
+		if (strcasecmp(s2, "circle"))	/* invalid */
+			break;
+		e.h = e.x1;
+		e.y1 = e.y0;	/* map radius in x1 y1 */
+		e.x1 = e.x0 + e.h;	/* map radius in x1 y1 */
+		e.x0 = e.x0 - e.h;	/* map radius in x1 y1 */
+		/* fallthrough */
+
+	case 7: /* token circle|rect x0 y0 x1 y1 h */
+		if (e.x1 < e.x0 || e.h <= 0) {
+			ast_log(LOG_WARNING, "error in coordinates\n");
+			e.type = 0;
+			break;
+		}
+		if (!strcasecmp(s2, "circle")) {
+			/* for a circle we specify the diameter but store center and radii */
+			e.type = KP_CIRCLE;
+			e.x0 = (e.x1 + e.x0) / 2;
+			e.y0 = (e.y1 + e.y0) / 2;
+			e.h = e.h / 2;
+		} else if (!strcasecmp(s2, "rect")) {
+			e.type = KP_RECT;
+		} else
+			break;
+		ret = 1;
+	}
+	// ast_log(LOG_WARNING, "reading [%s] returns %d %d\n", val, i, ret);
+	if (ret == 0)
+		return 0;
+	/* map the string into token to be returned */
+	i = atoi(s1);
+	if (i > 0 || s1[1] == '\0')	/* numbers or single characters */
+		e.c = (i > 9) ? i : s1[0];
+	else {
+		struct _s_k *p;
+		for (p = gui_key_map; p->s; p++) {
+			if (!strcasecmp(p->s, s1)) {
+				e.c = p->k;
+				break;
+			}
+		}
+	}
+	if (e.c == 0) {
+		ast_log(LOG_WARNING, "missing token\n");
+		return 0;
+	}
+	if (gui->kp_size == 0) {
+		gui->kp = ast_calloc(10, sizeof(e));
+		if (gui->kp == NULL) {
+			ast_log(LOG_WARNING, "cannot allocate kp");
+			return 0;
+		}
+		gui->kp_size = 10;
+	}
+	if (gui->kp_size == gui->kp_used) { /* must allocate */
+		struct keypad_entry *a = ast_realloc(gui->kp, sizeof(e)*(gui->kp_size+10));
+		if (a == NULL) {
+			ast_log(LOG_WARNING, "cannot reallocate kp");
+			return 0;
+		}
+		gui->kp = a;
+		gui->kp_size += 10;
+	}
+	if (gui->kp_size == gui->kp_used)
+		return 0;
+	gui->kp[gui->kp_used++] = e;
+	return 1;
+}
diff --git a/channels/console_video.c b/channels/console_video.c
index a1a95a8105eee7dd6ed8bff63a6fd676e2014a66..5a2e6fc2926642d00b7d3747cdc1cbcd33df86ae 100644
--- a/channels/console_video.c
+++ b/channels/console_video.c
@@ -111,7 +111,7 @@ int console_video_config(struct video_desc **penv, const char *var, const char *
 
 void console_video_start(struct video_desc *env, struct ast_channel *owner)
 {
-	ast_log(LOG_WARNING, "console video support not present\n");
+	ast_log(LOG_NOTICE, "voice only, console video support not present\n");
 }
 
 void console_video_uninit(struct video_desc *env)
@@ -163,6 +163,9 @@ struct fbuf_t {		/* frame buffers, dynamically allocated */
 	int	pix_fmt;
 };
 
+static void my_scale(struct fbuf_t *in, AVPicture *p_in,
+	struct fbuf_t *out, AVPicture *p_out);
+
 struct video_codec_desc;	/* forward declaration */
 /*
  * Descriptor of the local source, made of the following pieces:
@@ -197,9 +200,10 @@ struct video_out_desc {
 	struct fbuf_t	keypad_dpy;	/* keypad source buffer, XXX */
 
 	struct video_codec_desc *enc;	/* encoder */
-	AVCodecContext	*enc_ctx;	/* encoding context */
+	void		*enc_ctx;	/* encoding context */
 	AVCodec		*codec;
-	AVFrame		*frame;	/* The initial part is an AVPicture */
+	AVFrame		*enc_in_frame;	/* enc_in mapped into avcodec format. */
+					/* The initial part of AVFrame is an AVPicture */
 	int		mtu;
 	struct timeval	last_frame;	/* when we read the last frame ? */
 
@@ -246,41 +250,6 @@ struct video_in_desc {
 	struct fbuf_t rem_dpy;	/* display remote image, no buffer (it is in win[WIN_REMOTE].bmp) */
 };
 
-/*
- * Each codec is defined by a number of callbacks
- */
-/*! \brief initialize the encoder */
-typedef int (*encoder_init_f)(struct video_out_desc *v);
-
-/*! \brief actually call the encoder */
-typedef int (*encoder_encode_f)(struct video_out_desc *v);
-
-/*! \brief encapsulate the bistream in RTP frames */
-typedef struct ast_frame *(*encoder_encap_f)(struct video_out_desc *out,
-		struct ast_frame **tail);
-
-/*! \brief inizialize the decoder */
-typedef int (*decoder_init_f)(struct video_in_desc *v);
-
-/*! \brief extract the bitstream from RTP frames and store in the fbuf.
- * return 0 if ok, 1 on error
- */
-typedef int (*decoder_decap_f)(struct fbuf_t *b, uint8_t *data, int len);
-
-/*! \brief actually call the decoder */
-typedef int (*decoder_decode_f)(struct video_in_desc *v, struct fbuf_t *b);
-
-struct video_codec_desc {
-	const char		*name;		/* format name */
-	int			format;		/* AST_FORMAT_* */
-	encoder_init_f		enc_init;
-	encoder_encap_f		enc_encap;
-	encoder_encode_f	enc_run;
-	decoder_init_f		dec_init;
-	decoder_decap_f		dec_decap;
-	decoder_decode_f	dec_run;
-};
-
 /* our representation of a displayed window. SDL can only do one main
  * window so we map everything within that one
  */
@@ -462,917 +431,8 @@ static struct ast_frame *create_video_frame(uint8_t *start, uint8_t *end,
 	return f;
 }
 
-/* some debugging code to check the bitstream:
- * declare a bit buffer, initialize it, and fetch data from it.
- */
-struct bitbuf {
-	const uint8_t *base;
-	int	bitsize;	/* total size in bits */
-	int	ofs;	/* next bit to read */
-};
-
-static struct bitbuf bitbuf_init(const uint8_t *base, int bitsize, int start_ofs)
-{
-	struct bitbuf a;
-	a.base = base;
-	a.bitsize = bitsize;
-	a.ofs = start_ofs;
-	return a;
-}
-
-static int bitbuf_left(struct bitbuf *b)
-{
-	return b->bitsize - b->ofs;
-}
-
-static uint32_t getbits(struct bitbuf *b, int n)
-{
-	int i, ofs;
-	const uint8_t *d;
-	uint8_t mask;
-	uint32_t retval = 0;
-	if (n> 31) {
-		ast_log(LOG_WARNING, "too many bits %d, max 32\n", n);
-		return 0;
-	}
-	if (n + b->ofs > b->bitsize) {
-		ast_log(LOG_WARNING, "bitbuf overflow %d of %d\n", n + b->ofs, b->bitsize);
-		n = b->bitsize - b->ofs;
-	}
-	ofs = 7 - b->ofs % 8;	/* start from msb */
-	mask = 1 << ofs;
-	d = b->base + b->ofs / 8;	/* current byte */
-	for (i=0 ; i < n; i++) {
-		retval += retval + (*d & mask ? 1 : 0);	/* shift in new byte */
-		b->ofs++;
-		mask >>= 1;
-		if (mask == 0) {
-			d++;
-			mask = 0x80;
-		}
-	}
-	return retval;
-}
-
-static void check_h261(struct fbuf_t *b)
-{
-	struct bitbuf a = bitbuf_init(b->data, b->used * 8, 0);
-	uint32_t x, y;
-	
-	x = getbits(&a, 20);	/* PSC, 0000 0000 0000 0001 0000 */
-	if (x != 0x10) {
-		ast_log(LOG_WARNING, "bad PSC 0x%x\n", x);
-		return;
-	}
-	x = getbits(&a, 5);	/* temporal reference */
-	y = getbits(&a, 6);	/* ptype */
-	if (0)
-	ast_log(LOG_WARNING, "size %d TR %d PTY spl %d doc %d freeze %d %sCIF hi %d\n",
-		b->used,
-		x,
-		(y & 0x20) ? 1 : 0,
-		(y & 0x10) ? 1 : 0,
-		(y & 0x8) ? 1 : 0,
-		(y & 0x4) ? "" : "Q",
-		(y & 0x2) ? 1:0);
-	while ( (x = getbits(&a, 1)) == 1)
-		ast_log(LOG_WARNING, "PSPARE 0x%x\n", getbits(&a, 8));
-	// ast_log(LOG_WARNING, "PSPARE 0 - start GOB LAYER\n");
-	while ( (x = bitbuf_left(&a)) > 0) {
-		// ast_log(LOG_WARNING, "GBSC %d bits left\n", x);
-		x = getbits(&a, 16); /* GBSC 0000 0000 0000 0001 */
-		if (x != 0x1) {
-			ast_log(LOG_WARNING, "bad GBSC 0x%x\n", x);
-			break;
-		}
-		x = getbits(&a, 4);	/* group number */
-		y = getbits(&a, 5);	/* gquant */
-		if (x == 0) {
-			ast_log(LOG_WARNING, "  bad GN %d\n", x);
-			break;
-		}
-		while ( (x = getbits(&a, 1)) == 1)
-			ast_log(LOG_WARNING, "GSPARE 0x%x\n", getbits(&a, 8));
-		while ( (x = bitbuf_left(&a)) > 0) { /* MB layer */
-			break;
-		}
-	}
-}
-
-void dump_buf(struct fbuf_t *b);
-void dump_buf(struct fbuf_t *b)
-{
-	int i, x, last2lines;
-	char buf[80];
-
-	last2lines = (b->used - 16) & ~0xf;
-	ast_log(LOG_WARNING, "buf size %d of %d\n", b->used, b->size);
-	for (i = 0; i < b->used; i++) {
-		x = i & 0xf;
-		if ( x == 0) {	/* new line */
-			if (i != 0)
-				ast_log(LOG_WARNING, "%s\n", buf);
-			bzero(buf, sizeof(buf));
-			sprintf(buf, "%04x: ", i);
-		}
-		sprintf(buf + 6 + x*3, "%02x ", b->data[i]);
-		if (i > 31 && i < last2lines)
-			i = last2lines - 1;
-	}
-	if (buf[0])
-		ast_log(LOG_WARNING, "%s\n", buf);
-}
-/*
- * Here starts the glue code for the various supported video codecs.
- * For each of them, we need to provide routines for initialization,
- * calling the encoder, encapsulating the bitstream in ast_frames,
- * extracting payload from ast_frames, and calling the decoder.
- */
-
-/*--- h263+ support --- */
-
-/*! \brief initialization of h263p */
-static int h263p_enc_init(struct video_out_desc *v)
-{
-	/* modes supported are
-	- Unrestricted Motion Vector (annex D)
-	- Advanced Prediction (annex F)
-	- Advanced Intra Coding (annex I)
-	- Deblocking Filter (annex J)
-	- Slice Structure (annex K)
-	- Alternative Inter VLC (annex S)
-	- Modified Quantization (annex T)
-	*/
-	v->enc_ctx->flags |=CODEC_FLAG_H263P_UMV; /* annex D */
-	v->enc_ctx->flags |=CODEC_FLAG_AC_PRED; /* annex f ? */
-	v->enc_ctx->flags |=CODEC_FLAG_H263P_SLICE_STRUCT; /* annex k */
-	v->enc_ctx->flags |= CODEC_FLAG_H263P_AIC; /* annex I */
-
-	v->enc_ctx->gop_size = v->fps*5; // emit I frame every 5 seconds
-	return 0;
-}
-
-
-/*
- * Create RTP/H.263 fragments to avoid IP fragmentation. We fragment on a
- * PSC or a GBSC, but if we don't find a suitable place just break somewhere.
- * Everything is byte-aligned.
- */
-static struct ast_frame *h263p_encap(struct video_out_desc *out,
-	struct ast_frame **tail)
-{
-	struct ast_frame *cur = NULL, *first = NULL;
-	uint8_t *d = out->enc_out.data;
-	int len = out->enc_out.used;
-	int l = len; /* size of the current fragment. If 0, must look for a psc */
-
-	for (;len > 0; len -= l, d += l) {
-		uint8_t *data;
-		struct ast_frame *f;
-		int i, h;
-
-		if (len >= 3 && d[0] == 0 && d[1] == 0 && d[2] >= 0x80) {
-			/* we are starting a new block, so look for a PSC. */
-			for (i = 3; i < len - 3; i++) {
-				if (d[i] == 0 && d[i+1] == 0 && d[i+2] >= 0x80) {
-					l = i;
-					break;
-				}
-			}
-		}
-		if (l > out->mtu || l > len) { /* psc not found, split */
-			l = MIN(len, out->mtu);
-		}
-		if (l < 1 || l > out->mtu) {
-			ast_log(LOG_WARNING, "--- frame error l %d\n", l);
-			break;
-		}
-		
-		if (d[0] == 0 && d[1] == 0) { /* we start with a psc */
-			h = 0;
-		} else { /* no psc, create a header */
-			h = 2;
-		}
-
-		f = create_video_frame(d, d+l, AST_FORMAT_H263_PLUS, h, cur);
-		if (!f)
-			break;
-
-		data = f->data;
-		if (h == 0) {	/* we start with a psc */
-			data[0] |= 0x04;	// set P == 1, and we are done
-		} else {	/* no psc, create a header */
-			data[0] = data[1] = 0;	// P == 0
-		}
-
-		if (!cur)
-			first = f;
-		cur = f;
-	}
-
-	if (cur)
-		cur->subclass |= 1; // RTP Marker
-
-	*tail = cur;	/* end of the list */
-	return first;
-}
-
-/*! \brief extract the bitstreem from the RTP payload.
- * This is format dependent.
- * For h263+, the format is defined in RFC 2429
- * and basically has a fixed 2-byte header as follows:
- * 5 bits	RR	reserved, shall be 0
- * 1 bit	P	indicate a start/end condition,
- *			in which case the payload should be prepended
- *			by two zero-valued bytes.
- * 1 bit	V	there is an additional VRC header after this header
- * 6 bits	PLEN	length in bytes of extra picture header
- * 3 bits	PEBIT	how many bits to be ignored in the last byte
- *
- * XXX the code below is not complete.
- */
-static int h263p_decap(struct fbuf_t *b, uint8_t *data, int len)
-{
-	int PLEN;
-
-	if (len < 2) {
-		ast_log(LOG_WARNING, "invalid framesize %d\n", len);
-		return 1;
-	}
-	PLEN = ( (data[0] & 1) << 5 ) | ( (data[1] & 0xf8) >> 3);
-
-	if (PLEN > 0) {
-		data += PLEN;
-		len -= PLEN;
-	}
-	if (data[0] & 4)	/* bit P */
-		data[0] = data[1] = 0;
-	else {
-		data += 2;
-		len -= 2;
-	}
-	return fbuf_append(b, data, len, 0, 0);	/* ignore trail bits */
-}
-
-
-/*
- * generic encoder, used by the various protocols supported here.
- * We assume that the buffer is empty at the beginning.
- */
-static int ffmpeg_encode(struct video_out_desc *v)
-{
-	struct fbuf_t *b = &v->enc_out;
-	int i;
-
-	b->used = avcodec_encode_video(v->enc_ctx, b->data, b->size, v->frame);
-	i = avcodec_encode_video(v->enc_ctx, b->data + b->used, b->size - b->used, NULL); /* delayed frames ? */
-	if (i > 0) {
-		ast_log(LOG_WARNING, "have %d more bytes\n", i);
-		b->used += i;
-	}
-	return 0;
-}
-
-/*
- * Generic decoder, which is used by h263p, h263 and h261 as it simply
- * invokes ffmpeg's decoder.
- * av_parser_parse should merge a randomly chopped up stream into
- * proper frames. After that, if we have a valid frame, we decode it
- * until the entire frame is processed.
- */
-static int ffmpeg_decode(struct video_in_desc *v, struct fbuf_t *b)
-{
-	uint8_t *src = b->data;
-	int srclen = b->used;
-	int full_frame = 0;
-
-	if (srclen == 0)	/* no data */
-		return 0;
-	if (0)
-		check_h261(b);
-	// ast_log(LOG_WARNING, "rx size %d\n", srclen);
-	while (srclen) {
-		uint8_t *data;
-		int datalen, ret;
-		int len = av_parser_parse(v->parser, v->dec_ctx, &data, &datalen, src, srclen, 0, 0);
-
-		src += len;
-		srclen -= len;
-		/* The parser might return something it cannot decode, so it skips
-		 * the block returning no data
-		 */
-		if (data == NULL || datalen == 0)
-			continue;
-		ret = avcodec_decode_video(v->dec_ctx, v->d_frame, &full_frame, data, datalen);
-		if (full_frame == 1)	/* full frame */
-			break;
-		if (ret < 0) {
-			ast_log(LOG_NOTICE, "Error decoding\n");
-			break;
-		}
-	}
-	if (srclen != 0)	/* update b with leftover data */
-		bcopy(src, b->data, srclen);
-	b->used = srclen;
-	b->ebit = 0;
-	return full_frame;
-}
-
-static struct video_codec_desc h263p_codec = {
-	.name = "h263p",
-	.format = AST_FORMAT_H263_PLUS,
-	.enc_init = h263p_enc_init,
-	.enc_encap = h263p_encap,
-	.enc_run = ffmpeg_encode,
-	.dec_init = NULL,
-	.dec_decap = h263p_decap,
-	.dec_run = ffmpeg_decode
-};
-
-/*--- Plain h263 support --------*/
-
-static int h263_enc_init(struct video_out_desc *v)
-{
-	/* XXX check whether these are supported */
-	v->enc_ctx->flags |= CODEC_FLAG_H263P_UMV;
-	v->enc_ctx->flags |= CODEC_FLAG_H263P_AIC;
-	v->enc_ctx->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
-	v->enc_ctx->flags |= CODEC_FLAG_AC_PRED;
-
-	v->enc_ctx->gop_size = v->fps*5;
-
-	return 0;
-}
-
-/*
- * h263 encapsulation is specified in RFC2190. There are three modes
- * defined (A, B, C), with 4, 8 and 12 bytes of header, respectively.
- * The header is made as follows
- *     0.....................|.......................|.............|....31
- *	F:1 P:1 SBIT:3 EBIT:3 SRC:3 I:1 U:1 S:1 A:1 R:4 DBQ:2 TRB:3 TR:8
- * FP = 0- mode A, (only one word of header)
- * FP = 10 mode B, and also means this is an I or P frame
- * FP = 11 mode C, and also means this is a PB frame.
- * SBIT, EBIT nuber of bits to ignore at beginning (msbits) and end (lsbits)
- * SRC  bits 6,7,8 from the h263 PTYPE field
- * I = 0 intra-coded, 1 = inter-coded (bit 9 from PTYPE)
- * U = 1 for Unrestricted Motion Vector (bit 10 from PTYPE)
- * S = 1 for Syntax Based Arith coding (bit 11 from PTYPE)
- * A = 1 for Advanced Prediction (bit 12 from PTYPE)
- * R = reserved, must be 0
- * DBQ = differential quantization, DBQUANT from h263, 0 unless we are using
- *	PB frames
- * TRB = temporal reference for bframes, also 0 unless this is a PB frame
- * TR = temporal reference for P frames, also 0 unless PB frame.
- *
- * Mode B and mode C description omitted.
- *
- * An RTP frame can start with a PSC 0000 0000 0000 0000 1000 0
- * or with a GBSC, which also has the first 17 bits as a PSC.
- * Note - PSC are byte-aligned, GOB not necessarily. PSC start with
- *	PSC:22 0000 0000 0000 0000 1000 00 	picture start code
- *	TR:8   .... ....			temporal reference
- *      PTYPE:13 or more 			ptype...
- * If we don't fragment a GOB SBIT and EBIT = 0.
- * reference, 8 bit) 
- * 
- * The assumption below is that we start with a PSC.
- */
-static struct ast_frame *h263_encap(struct video_out_desc *out,
-		struct ast_frame **tail)
-{
-	uint8_t *d = out->enc_out.data;
-	int start = 0, i, len = out->enc_out.used;
-	struct ast_frame *f, *cur = NULL, *first = NULL;
-	const int pheader_len = 4;	/* Use RFC-2190 Mode A */
-	uint8_t h263_hdr[12];	/* worst case, room for a type c header */
-	uint8_t *h = h263_hdr;	/* shorthand */
-
-#define H263_MIN_LEN	6
-	if (len < H263_MIN_LEN)	/* unreasonably small */
-		return NULL;
-
-	bzero(h263_hdr, sizeof(h263_hdr));
-	/* Now set the header bytes. Only type A by now,
-	 * and h[0] = h[2] = h[3] = 0 by default.
-	 * PTYPE starts 30 bits in the picture, so the first useful
-	 * bit for us is bit 36 i.e. within d[4] (0 is the msbit).
-	 * SRC = d[4] & 0x1c goes into data[1] & 0xe0
-	 * I   = d[4] & 0x02 goes into data[1] & 0x10
-	 * U   = d[4] & 0x01 goes into data[1] & 0x08
-	 * S   = d[5] & 0x80 goes into data[1] & 0x04
-	 * A   = d[5] & 0x40 goes into data[1] & 0x02
-	 * R   = 0           goes into data[1] & 0x01
-	 * Optimizing it, we have
-	 */
-	h[1] = ( (d[4] & 0x1f) << 3 ) |	/* SRC, I, U */
-		( (d[5] & 0xc0) >> 5 );		/* S, A, R */
-
-	/* now look for the next PSC or GOB header. First try to hit
-	 * a '0' byte then look around for the 0000 0000 0000 0000 1 pattern
-	 * which is both in the PSC and the GBSC.
-	 */
-	for (i = H263_MIN_LEN, start = 0; start < len; start = i, i += 3) {
-		//ast_log(LOG_WARNING, "search at %d of %d/%d\n", i, start, len);
-		for (; i < len ; i++) {
-			uint8_t x, rpos, lpos;
-			int rpos_i;	/* index corresponding to rpos */
-			if (d[i] != 0)		/* cannot be in a GBSC */
-				continue;
-			if (i > len - 1)
-				break;
-			x = d[i+1];
-			if (x == 0)	/* next is equally good */
-				continue;
-			/* see if around us we can make 16 '0' bits for the GBSC.
-			 * Look for the first bit set on the right, and then
-			 * see if we have enough 0 on the left.
-			 * We are guaranteed to end before rpos == 0
-			 */
-			for (rpos = 0x80, rpos_i = 8; rpos; rpos >>= 1, rpos_i--)
-				if (x & rpos)	/* found the '1' bit in GBSC */
-					break;
-			x = d[i-1];		/* now look behind */
-			for (lpos = rpos; lpos ; lpos >>= 1)
-				if (x & lpos)	/* too early, not a GBSC */
-					break;
-			if (lpos)		/* as i said... */
-				continue;
-			/* now we have a GBSC starting somewhere in d[i-1],
-			 * but it might be not byte-aligned
-			 */
-			if (rpos == 0x80) {	/* lucky case */
-				i = i - 1;
-			} else {	/* XXX to be completed */
-				ast_log(LOG_WARNING, "unaligned GBSC 0x%x %d\n",
-					rpos, rpos_i);
-			}
-			break;
-		}
-		/* This frame is up to offset i (not inclusive).
-		 * We do not split it yet even if larger than MTU.
-		 */
-		f = create_video_frame(d + start, d+i, AST_FORMAT_H263,
-				pheader_len, cur);
-
-		if (!f)
-			break;
-		bcopy(h, f->data, 4);	/* copy the h263 header */
-		/* XXX to do: if not aligned, fix sbit and ebit,
-		 * then move i back by 1 for the next frame
-		 */
-		if (!cur)
-			first = f;
-		cur = f;
-	}
-
-	if (cur)
-		cur->subclass |= 1;	// RTP Marker
-
-	*tail = cur;
-	return first;
-}
-
-/* XXX We only drop the header here, but maybe we need more. */
-static int h263_decap(struct fbuf_t *b, uint8_t *data, int len)
-{
-	if (len < 4) {
-		ast_log(LOG_WARNING, "invalid framesize %d\n", len);
-		return 1;	/* error */
-	}
-
-	if ( (data[0] & 0x80) == 0) {
-		len -= 4;
-		data += 4;
-	} else {
-		ast_log(LOG_WARNING, "unsupported mode 0x%x\n",
-			data[0]);
-		return 1;
-	}
-	return fbuf_append(b, data, len, 0, 0);	/* XXX no bit alignment support yet */
-}
-
-static struct video_codec_desc h263_codec = {
-	.name = "h263",
-	.format = AST_FORMAT_H263,
-	.enc_init = h263_enc_init,
-	.enc_encap = h263_encap,
-	.enc_run = ffmpeg_encode,
-	.dec_init = NULL,
-	.dec_decap = h263_decap,
-	.dec_run = ffmpeg_decode
-						
-};
-
-/*---- h261 support -----*/
-static int h261_enc_init(struct video_out_desc *v)
-{
-	/* It is important to set rtp_payload_size = 0, otherwise
-	 * ffmpeg in h261 mode will produce output that it cannot parse.
-	 * Also try to send I frames more frequently than with other codecs.
-	 */
-	v->enc_ctx->rtp_payload_size = 0; /* important - ffmpeg fails otherwise */
-	v->enc_ctx->gop_size = v->fps*2;	/* be more responsive */
-
-	return 0;
-}
-
-/*
- * The encapsulation of H261 is defined in RFC4587 which obsoletes RFC2032
- * The bitstream is preceded by a 32-bit header word:
- *  SBIT:3 EBIT:3 I:1 V:1 GOBN:4 MBAP:5 QUANT:5 HMVD:5 VMVD:5
- * SBIT and EBIT are the bits to be ignored at beginning and end,
- * I=1 if the stream has only INTRA frames - cannot change during the stream.
- * V=0 if motion vector is not used. Cannot change.
- * GOBN is the GOB number in effect at the start of packet, 0 if we
- *	start with a GOB header
- * QUANT is the quantizer in effect, 0 if we start with GOB header
- * HMVD  reference horizontal motion vector. 10000 is forbidden
- * VMVD  reference vertical motion vector, as above.
- * Packetization should occur at GOB boundaries, and if not possible
- * with MacroBlock fragmentation. However it is likely that blocks
- * are not bit-aligned so we must take care of this.
- */
-static struct ast_frame *h261_encap(struct video_out_desc *out,
-		struct ast_frame **tail)
-{
-	uint8_t *d = out->enc_out.data;
-	int start = 0, i, len = out->enc_out.used;
-	struct ast_frame *f, *cur = NULL, *first = NULL;
-	const int pheader_len = 4;
-	uint8_t h261_hdr[4];
-	uint8_t *h = h261_hdr;	/* shorthand */
-	int sbit = 0, ebit = 0;
-
-#define H261_MIN_LEN 10
-	if (len < H261_MIN_LEN)	/* unreasonably small */
-		return NULL;
-
-	bzero(h261_hdr, sizeof(h261_hdr));
-
-	/* Similar to the code in h263_encap, but the marker there is longer.
-	 * Start a few bytes within the bitstream to avoid hitting the marker
-	 * twice. Note we might access the buffer at len, but this is ok because
-	 * the caller has it oversized.
-	 */
-	for (i = H261_MIN_LEN, start = 0; start < len - 1; start = i, i += 4) {
-#if 0	/* test - disable packetization */
-		i = len;	/* wrong... */
-#else
-		int found = 0, found_ebit = 0;	/* last GBSC position found */
-		for (; i < len ; i++) {
-			uint8_t x, rpos, lpos;
-			if (d[i] != 0)		/* cannot be in a GBSC */
-				continue;
-			x = d[i+1];
-			if (x == 0)	/* next is equally good */
-				continue;
-			/* See if around us we find 15 '0' bits for the GBSC.
-			 * Look for the first bit set on the right, and then
-			 * see if we have enough 0 on the left.
-			 * We are guaranteed to end before rpos == 0
-			 */
-			for (rpos = 0x80, ebit = 7; rpos; ebit--, rpos >>= 1)
-				if (x & rpos)	/* found the '1' bit in GBSC */
-					break;
-			x = d[i-1];		/* now look behind */
-			for (lpos = (rpos >> 1); lpos ; lpos >>= 1)
-				if (x & lpos)	/* too early, not a GBSC */
-					break;
-			if (lpos)		/* as i said... */
-				continue;
-			/* now we have a GBSC starting somewhere in d[i-1],
-			 * but it might be not byte-aligned. Just remember it.
-			 */
-			if (i - start > out->mtu) /* too large, stop now */
-				break;
-			found_ebit = ebit;
-			found = i;
-			i += 4;	/* continue forward */
-		}
-		if (i >= len) {	/* trim if we went too forward */
-			i = len;
-			ebit = 0;	/* hopefully... should ask the bitstream ? */
-		}
-		if (i - start > out->mtu && found) {
-			/* use the previous GBSC, hope is within the mtu */
-			i = found;
-			ebit = found_ebit;
-		}
-#endif /* test */
-		if (i - start < 4)	/* XXX too short ? */
-			continue;
-		/* This frame is up to offset i (not inclusive).
-		 * We do not split it yet even if larger than MTU.
-		 */
-		f = create_video_frame(d + start, d+i, AST_FORMAT_H261,
-				pheader_len, cur);
-
-		if (!f)
-			break;
-		/* recompute header with I=0, V=1 */
-		h[0] = ( (sbit & 7) << 5 ) | ( (ebit & 7) << 2 ) | 1;
-		bcopy(h, f->data, 4);	/* copy the h261 header */
-		if (ebit)	/* not aligned, restart from previous byte */
-			i--;
-		sbit = (8 - ebit) & 7;
-		ebit = 0;
-		if (!cur)
-			first = f;
-		cur = f;
-	}
-	if (cur)
-		cur->subclass |= 1;	// RTP Marker
-
-	*tail = cur;
-	return first;
-}
-
-/*
- * Pieces might be unaligned so we really need to put them together.
- */
-static int h261_decap(struct fbuf_t *b, uint8_t *data, int len)
-{
-	int ebit, sbit;
-
-	if (len < 8) {
-		ast_log(LOG_WARNING, "invalid framesize %d\n", len);
-		return 1;
-	}
-	sbit = (data[0] >> 5) & 7;
-	ebit = (data[0] >> 2) & 7;
-	len -= 4;
-	data += 4;
-	return fbuf_append(b, data, len, sbit, ebit);
-}
-
-static struct video_codec_desc h261_codec = {
-	.name = "h261",
-	.format = AST_FORMAT_H261,
-	.enc_init = h261_enc_init,
-	.enc_encap = h261_encap,
-	.enc_run = ffmpeg_encode,
-	.dec_init = NULL,
-	.dec_decap = h261_decap,
-	.dec_run = ffmpeg_decode
-};
-
-/* mpeg4 support */
-static int mpeg4_enc_init(struct video_out_desc *v)
-{
-#if 0
-	//v->enc_ctx->flags |= CODEC_FLAG_LOW_DELAY; /*don't use b frames ?*/
-	v->enc_ctx->flags |= CODEC_FLAG_AC_PRED;
-	v->enc_ctx->flags |= CODEC_FLAG_H263P_UMV;
-	v->enc_ctx->flags |= CODEC_FLAG_QPEL;
-	v->enc_ctx->flags |= CODEC_FLAG_4MV;
-	v->enc_ctx->flags |= CODEC_FLAG_GMC;
-	v->enc_ctx->flags |= CODEC_FLAG_LOOP_FILTER;
-	v->enc_ctx->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
-#endif
-	v->enc_ctx->gop_size = v->fps*5;
-	v->enc_ctx->rtp_payload_size = 0; /* important - ffmpeg fails otherwise */
-	return 0;
-}
-
-/* simplistic encapsulation - just split frames in mtu-size units */
-static struct ast_frame *mpeg4_encap(struct  video_out_desc *out,
-	struct ast_frame **tail)
-{
-	struct ast_frame *f, *cur = NULL, *first = NULL;
-	uint8_t *d = out->enc_out.data;
-	uint8_t *end = d+out->enc_out.used;
-	int len;
-
-	for (;d < end; d += len, cur = f) {
-		len = MIN(out->mtu, end-d);
-		f = create_video_frame(d, d+len, AST_FORMAT_MP4_VIDEO, 0, cur);
-		if (!f)
-			break;
-		if (!first)
-			first = f;
-	}
-	if (cur)
-		cur->subclass |= 1;
-	*tail = cur;
-	return first;
-}
-
-static int mpeg4_decap(struct fbuf_t *b, uint8_t *data, int len)
-{
-	return fbuf_append(b, data, len, 0, 0);
-}
-
-static int mpeg4_decode(struct video_in_desc *v, struct fbuf_t *b)
-{
-	int full_frame = 0, datalen = b->used;
-	int ret = avcodec_decode_video(v->dec_ctx, v->d_frame, &full_frame,
-		b->data, datalen);
-	if (ret < 0) {
-		ast_log(LOG_NOTICE, "Error decoding\n");
-		ret = datalen; /* assume we used everything. */
-	}
-	datalen -= ret;
-	if (datalen > 0)	/* update b with leftover bytes */
-		bcopy(b->data + ret, b->data, datalen);
-	b->used = datalen;
-	b->ebit = 0;
-	return full_frame;
-}
-
-static struct video_codec_desc mpeg4_codec = {
-	.name = "mpeg4",
-	.format = AST_FORMAT_MP4_VIDEO,
-	.enc_init = mpeg4_enc_init,
-	.enc_encap = mpeg4_encap,
-	.enc_run = ffmpeg_encode,
-	.dec_init = NULL,
-	.dec_decap = mpeg4_decap,
-	.dec_run = mpeg4_decode
-};
-
-static int h264_enc_init(struct video_out_desc *v)
-{
-	v->enc_ctx->flags |= CODEC_FLAG_TRUNCATED;
-	//v->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-	//v->enc_ctx->flags2 |= CODEC_FLAG2_FASTPSKIP;
-	/* TODO: Maybe we need to add some other flags */
-	v->enc_ctx->gop_size = v->fps*5; // emit I frame every 5 seconds
-	v->enc_ctx->rtp_mode = 0;
-	v->enc_ctx->rtp_payload_size = 0;
-	v->enc_ctx->bit_rate_tolerance = v->enc_ctx->bit_rate;
-	return 0;
-}
-
-static int h264_dec_init(struct video_in_desc *v)
-{
-	v->dec_ctx->flags |= CODEC_FLAG_TRUNCATED;
-
-	return 0;
-}
-
-/*
- * The structure of a generic H.264 stream is:
- * - 0..n 0-byte(s), unused, optional. one zero-byte is always present
- *   in the first NAL before the start code prefix.
- * - start code prefix (3 bytes): 0x000001
- *   (the first bytestream has a 
- *   like these 0x00000001!)
- * - NAL header byte ( F[1] | NRI[2] | Type[5] ) where type != 0
- * - byte-stream
- * - 0..n 0-byte(s) (padding, unused).
- * Segmentation in RTP only needs to be done on start code prefixes.
- * If fragments are too long... we don't support it yet.
- * - encapsulate (or fragment) the byte-stream (with NAL header included)
- */
-static struct ast_frame *h264_encap(struct video_out_desc *out,
-	struct ast_frame **tail)
-{
-	struct ast_frame *f = NULL, *cur = NULL, *first = NULL;
-	uint8_t *d, *start = out->enc_out.data;
-	uint8_t *end = start + out->enc_out.used;
-
-	/* Search the first start code prefix - ITU-T H.264 sec. B.2,
-	 * and move start right after that, on the NAL header byte.
-	 */
-#define HAVE_NAL(x) (x[-4] == 0 && x[-3] == 0 && x[-2] == 0 && x[-1] == 1)
-	for (start += 4; start < end; start++) {
-		int ty = start[0] & 0x1f;
-		if (HAVE_NAL(start) && ty != 0 && ty != 31)
-			break;
-	}
-	/* if not found, or too short, we just skip the next loop and are done. */
-
-	/* Here follows the main loop to create frames. Search subsequent start
-	 * codes, and then possibly fragment the unit into smaller fragments.
-	 */
-   for (;start < end - 4; start = d) {
-	int size;		/* size of current block */
-	uint8_t hdr[2];		/* add-on header when fragmenting */
-	int ty = 0;
-
-	/* now search next nal */
-	for (d = start + 4; d < end; d++) {
-		ty = d[0] & 0x1f;
-		if (HAVE_NAL(d))
-			break;	/* found NAL */
-	}
-	/* have a block to send. d past the start code unless we overflow */
-	if (d >= end) {	/* NAL not found */
-		d = end + 4;
-	} else if (ty == 0 || ty == 31) { /* found but invalid type, skip */
-		ast_log(LOG_WARNING, "skip invalid nal type %d at %d of %d\n",
-			ty, d - out->enc_out.data, out->enc_out.used);
-		continue;
-	}
-
-	size = d - start - 4;	/* don't count the end */
-
-	if (size < out->mtu) {	// test - don't fragment
-		// Single NAL Unit
-		f = create_video_frame(start, d - 4, AST_FORMAT_H264, 0, cur);
-		if (!f)
-			break;
-		if (!first)
-			first = f;
-
-		cur = f;
-		continue;
-	}
-
-	// Fragmented Unit (Mode A: no DON, very weak)
-	hdr[0] = (*start & 0xe0) | 28;	/* mark as a fragmentation unit */
-	hdr[1] = (*start++ & 0x1f) | 0x80 ;	/* keep type and set START bit */
-	size--;		/* skip the NAL header */
-	while (size) {
-		uint8_t *data;
-		int frag_size = MIN(size, out->mtu);
-
-		f = create_video_frame(start, start+frag_size, AST_FORMAT_H264, 2, cur);
-		if (!f)
-			break;
-		size -= frag_size;	/* skip this data block */
-		start += frag_size;
-
-		data = f->data;
-		data[0] = hdr[0];
-		data[1] = hdr[1] | (size == 0 ? 0x40 : 0);	/* end bit if we are done */
-		hdr[1] &= ~0x80;	/* clear start bit for subsequent frames */
-		if (!first)
-			first = f;
-		cur = f;
-	}
-    }
-
-	if (cur)
-		cur->subclass |= 1;     // RTP Marker
-
-	*tail = cur;
-
-	return first;
-}
-
-static int h264_decap(struct fbuf_t *b, uint8_t *data, int len)
-{
-	/* Start Code Prefix (Annex B in specification) */
-	uint8_t scp[] = { 0x00, 0x00, 0x00, 0x01 };
-	int retval = 0;
-	int type, ofs = 0;
-
-	if (len < 2) {
-		ast_log(LOG_WARNING, "--- invalid len %d\n", len);
-		return 1;
-	}
-	/* first of all, check if the packet has F == 0 */
-	if (data[0] & 0x80) {
-		ast_log(LOG_WARNING, "--- forbidden packet; nal: %02x\n",
-			data[0]);
-		return 1;
-	}
-
-	type = data[0] & 0x1f;
-	switch (type) {
-	case 0:
-	case 31:
-		ast_log(LOG_WARNING, "--- invalid type: %d\n", type);
-		return 1;
-	case 24:
-	case 25:
-	case 26:
-	case 27:
-	case 29:
-		ast_log(LOG_WARNING, "--- encapsulation not supported : %d\n", type);
-		return 1;
-	case 28:	/* FU-A Unit */
-		if (data[1] & 0x80) { // S == 1, import F and NRI from next
-			data[1] &= 0x1f;	/* preserve type */
-			data[1] |= (data[0] & 0xe0);	/* import F & NRI */
-			retval = fbuf_append(b, scp, sizeof(scp), 0, 0);
-			ofs = 1;
-		} else {
-			ofs = 2;
-		}
-		break;
-	default:	/* From 1 to 23 (Single NAL Unit) */
-		retval = fbuf_append(b, scp, sizeof(scp), 0, 0);
-	}
-	if (!retval)
-		retval = fbuf_append(b, data + ofs, len - ofs, 0, 0);
-	if (retval)
-		ast_log(LOG_WARNING, "result %d\n", retval);
-	return retval;
-}
-
-static struct video_codec_desc h264_codec = {
-	.name = "h264",
-	.format = AST_FORMAT_H264,
-	.enc_init = h264_enc_init,
-	.enc_encap = h264_encap,
-	.enc_run = ffmpeg_encode,
-	.dec_init = h264_dec_init,
-	.dec_decap = h264_decap,
-	.dec_run = ffmpeg_decode
-};
+#include "vcodecs.c"
+#include "console_gui.c"
 
 /*------ end codec specific code -----*/
 
@@ -1584,68 +644,6 @@ static int video_read(struct video_out_desc *v)
  * to display the frame.
  */
 
-/*
- * Table of translation between asterisk and ffmpeg formats.
- * We need also a field for read and write (encoding and decoding), because
- * e.g. H263+ uses different codec IDs in ffmpeg when encoding or decoding.
- */
-struct _cm {	/* map ffmpeg codec types to asterisk formats */
-	uint32_t	ast_format;	/* 0 is a terminator */
-	enum CodecID	codec;
-	enum { CM_RD = 1, CM_WR = 2, CM_RDWR = 3 } rw;	/* read or write or both ? */
-	struct video_codec_desc *codec_desc;
-};
-
-static struct _cm video_formats[] = {
-	{ AST_FORMAT_H263_PLUS,	CODEC_ID_H263,  CM_RD }, /* incoming H263P ? */
-	{ AST_FORMAT_H263_PLUS,	CODEC_ID_H263P, CM_WR },
-	{ AST_FORMAT_H263,	CODEC_ID_H263,  CM_RD },
-	{ AST_FORMAT_H263,	CODEC_ID_H263,  CM_WR },
-	{ AST_FORMAT_H261,	CODEC_ID_H261,  CM_RDWR },
-	{ AST_FORMAT_H264,	CODEC_ID_H264,  CM_RDWR },
-	{ AST_FORMAT_MP4_VIDEO,	CODEC_ID_MPEG4, CM_RDWR },
-	{ 0,			0, 0 },
-};
-
-
-/*! \brief map an asterisk format into an ffmpeg one */
-static enum CodecID map_video_format(uint32_t ast_format, int rw)
-{
-	struct _cm *i;
-
-	for (i = video_formats; i->ast_format != 0; i++)
-		if (ast_format & i->ast_format && rw & i->rw && rw & i->rw)
-			return i->codec;
-	return CODEC_ID_NONE;
-}
-
-/* pointers to supported codecs. We assume the first one to be non null. */
-static struct video_codec_desc *supported_codecs[] = {
-	&h263p_codec,
-	&h264_codec,
-	&h263_codec,
-	&h261_codec,
-	&mpeg4_codec,
-	NULL
-};
-
-/*
- * Map the AST_FORMAT to the library. If not recognised, fail.
- * This is useful in the input path where we get frames.
- */
-static struct video_codec_desc *map_video_codec(int fmt)
-{
-	int i;
-
-	for (i = 0; supported_codecs[i]; i++)
-		if (fmt == supported_codecs[i]->format) {
-			ast_log(LOG_WARNING, "using %s for format 0x%x\n",
-				supported_codecs[i]->name, fmt);
-			return supported_codecs[i];
-		}
-	return NULL;
-}
-;
 /*
  * Map the codec name to the library. If not recognised, use a default.
  * This is useful in the output path where we decide by name, presumably.
@@ -1719,6 +717,7 @@ static int video_in_init(struct video_in_desc *v, uint32_t format)
 	* Initialize the codec context.
 	*/
 	v->dec_ctx = avcodec_alloc_context();
+	/* XXX call dec_init() ? */
 	if (avcodec_open(v->dec_ctx, v->codec) < 0) {
 		ast_log(LOG_WARNING, "Cannot open the codec context\n");
 		av_free(v->dec_ctx);
@@ -1743,14 +742,16 @@ static int video_in_init(struct video_in_desc *v, uint32_t format)
 /*! \brief uninitialize the descriptor for local video stream */
 static int video_out_uninit(struct video_out_desc *v)
 {
+	/* XXX this should be a codec callback */
 	if (v->enc_ctx) {
-		avcodec_close(v->enc_ctx);
-		av_free(v->enc_ctx);
+		AVCodecContext *enc_ctx = (AVCodecContext *)v->enc_ctx;
+		avcodec_close(enc_ctx);
+		av_free(enc_ctx);
 		v->enc_ctx = NULL;
 	}
-	if (v->frame) {
-		av_free(v->frame);
-		v->frame = NULL;
+	if (v->enc_in_frame) {
+		av_free(v->enc_in_frame);
+		v->enc_in_frame = NULL;
 	}
 	v->codec = NULL;	/* only a reference */
 	
@@ -1786,7 +787,7 @@ static int video_out_init(struct video_desc *env)
 
 	v->enc_ctx		= NULL;
 	v->codec		= NULL;
-	v->frame		= NULL;
+	v->enc_in_frame		= NULL;
 	v->enc_out.data		= NULL;
 
 	if (v->loc_src.data == NULL) {
@@ -1814,86 +815,61 @@ static int video_out_init(struct video_desc *env)
 		ast_log(LOG_WARNING, "Cannot allocate encoder input buffer\n");
 		return video_out_uninit(v);
 	}
-	v->frame = avcodec_alloc_frame();
-	if (!v->frame) {
+	/* construct an AVFrame that points into buf_in */
+	v->enc_in_frame = avcodec_alloc_frame();
+	if (!v->enc_in_frame) {
 		ast_log(LOG_WARNING, "Unable to allocate the encoding video frame\n");
 		return video_out_uninit(v);
 	}
 
 	/* parameters for PIX_FMT_YUV420P */
 	size = enc_in->w * enc_in->h;
-	v->frame->data[0] = enc_in->data;
-	v->frame->data[1] = v->frame->data[0] + size;
-	v->frame->data[2] = v->frame->data[1] + size/4;
-	v->frame->linesize[0] = enc_in->w;
-	v->frame->linesize[1] = enc_in->w/2;
-	v->frame->linesize[2] = enc_in->w/2;
-
-	/* now setup the parameters for the encoder */
-	v->enc_ctx = avcodec_alloc_context();
-	v->enc_ctx->pix_fmt = enc_in->pix_fmt;
-	v->enc_ctx->width = enc_in->w;
-	v->enc_ctx->height = enc_in->h;
+	v->enc_in_frame->data[0] = enc_in->data;
+	v->enc_in_frame->data[1] = v->enc_in_frame->data[0] + size;
+	v->enc_in_frame->data[2] = v->enc_in_frame->data[1] + size/4;
+	v->enc_in_frame->linesize[0] = enc_in->w;
+	v->enc_in_frame->linesize[1] = enc_in->w/2;
+	v->enc_in_frame->linesize[2] = enc_in->w/2;
+
+	/* now setup the parameters for the encoder.
+	 * XXX should be codec-specific
+	 */
+    {
+	AVCodecContext *enc_ctx = avcodec_alloc_context();
+	v->enc_ctx = enc_ctx;
+	enc_ctx->pix_fmt = enc_in->pix_fmt;
+	enc_ctx->width = enc_in->w;
+	enc_ctx->height = enc_in->h;
 	/* XXX rtp_callback ?
 	 * rtp_mode so ffmpeg inserts as many start codes as possible.
 	 */
-	v->enc_ctx->rtp_mode = 1;
-	v->enc_ctx->rtp_payload_size = v->mtu / 2; // mtu/2
-	v->enc_ctx->bit_rate = v->bitrate;
-	v->enc_ctx->bit_rate_tolerance = v->enc_ctx->bit_rate/2;
-	v->enc_ctx->qmin = v->qmin;	/* should be configured */
-	v->enc_ctx->time_base = (AVRational){1, v->fps};
-
-	v->enc->enc_init(v);
+	enc_ctx->rtp_mode = 1;
+	enc_ctx->rtp_payload_size = v->mtu / 2; // mtu/2
+	enc_ctx->bit_rate = v->bitrate;
+	enc_ctx->bit_rate_tolerance = enc_ctx->bit_rate/2;
+	enc_ctx->qmin = v->qmin;	/* should be configured */
+	enc_ctx->time_base = (AVRational){1, v->fps};
+	enc_ctx->gop_size = v->fps*5; // emit I frame every 5 seconds
+
+	v->enc->enc_init(v->enc_ctx);
  
-	if (avcodec_open(v->enc_ctx, v->codec) < 0) {
+	if (avcodec_open(enc_ctx, v->codec) < 0) {
 		ast_log(LOG_WARNING, "Unable to initialize the encoder %d\n",
 			codec);
-		av_free(v->enc_ctx);
+		av_free(enc_ctx);
 		v->enc_ctx = NULL;
 		return video_out_uninit(v);
 	}
-
+    }
 	/*
 	 * Allocate enough for the encoded bitstream. As we are compressing,
-	 * we hope that the output is never larger than the input size.
-	 */
-	v->enc_out.data = ast_calloc(1, enc_in->size);
-	v->enc_out.size = enc_in->size;
-	v->enc_out.used = 0;
-
-	return 0;
-}
-
-static void cleanup_sdl(struct video_desc *env)  
-{
-	int i;
-
-#ifdef HAVE_SDL_TTF
-	/* unload font file */ 
-	if (env->gui.font) {
-		TTF_CloseFont(env->gui.font);
-		env->gui.font = NULL; 
-	}
-
-	/* uninitialize SDL_ttf library */
-	if ( TTF_WasInit() )
-		TTF_Quit();
-#endif
+	 * we hope that the output is never larger than the input size.
+	 */
+	v->enc_out.data = ast_calloc(1, enc_in->size);
+	v->enc_out.size = enc_in->size;
+	v->enc_out.used = 0;
 
-	/* uninitialize the SDL environment */
-	for (i = 0; i < WIN_MAX; i++) {
-		if (env->win[i].bmp)
-			SDL_FreeYUVOverlay(env->win[i].bmp);
-	}
-	if (env->gui.keypad)
-		SDL_FreeSurface(env->gui.keypad);
-	env->gui.keypad = NULL;
-	SDL_Quit();
-	env->screen = NULL; /* XXX check reference */
-	bzero(env->win, sizeof(env->win));
-	if (env->sdl_ok)
-		ast_mutex_destroy(&(env->in.dec_in_lock));
+	return 0;
 }
 
 /*! \brief uninitialize the entire environment.
@@ -1995,58 +971,6 @@ static void my_scale(struct fbuf_t *in, AVPicture *p_in,
 #endif /* XXX replacement */
 }
 
-/*
- * Display video frames (from local or remote stream) using the SDL library.
- * - Set the video mode to use the resolution specified by the codec context
- * - Create a YUV Overlay to copy the frame into it;
- * - After the frame is copied into the overlay, display it
- *
- * The size is taken from the configuration.
- *
- * 'out' is 0 for remote video, 1 for the local video
- */
-static void show_frame(struct video_desc *env, int out)
-{
-	AVPicture *p_in, p_out;
-	struct fbuf_t *b_in, *b_out;
-	SDL_Overlay *bmp;
-
-	if (!env->sdl_ok)
-		return;
-
-	if (out == WIN_LOCAL) {	/* webcam/x11 to sdl */
-		b_in = &env->out.enc_in;
-		b_out = &env->out.loc_dpy;
-		p_in = NULL;
-	} else {
-		/* copy input format from the decoding context */
-		AVCodecContext *c = env->in.dec_ctx;
-		b_in = &env->in.dec_out;
-                b_in->pix_fmt = c->pix_fmt;
-                b_in->w = c->width;
-                b_in->h = c->height;
-
-		b_out = &env->in.rem_dpy;
-		p_in = (AVPicture *)env->in.d_frame;
-	}
-	bmp = env->win[out].bmp;
-	SDL_LockYUVOverlay(bmp);
-	/* output picture info - this is sdl, YUV420P */
-	bzero(&p_out, sizeof(p_out));
-	p_out.data[0] = bmp->pixels[0];
-	p_out.data[1] = bmp->pixels[1];
-	p_out.data[2] = bmp->pixels[2];
-	p_out.linesize[0] = bmp->pitches[0];
-	p_out.linesize[1] = bmp->pitches[1];
-	p_out.linesize[2] = bmp->pitches[2];
-
-	my_scale(b_in, p_in, b_out, &p_out);
-
-	/* lock to protect access to Xlib by different threads. */
-	SDL_DisplayYUVOverlay(bmp, &env->win[out].rect);
-	SDL_UnlockYUVOverlay(bmp);
-}
-
 struct video_desc *get_video_desc(struct ast_channel *c);
 
 /*
@@ -2188,489 +1112,6 @@ static struct ast_frame *get_video_frames(struct video_desc *env, struct ast_fra
 	return v->enc->enc_encap(v, tail);
 }
 
-/*
- * GUI layout, structure and management
- *
-
-For the GUI we use SDL to create a large surface (env->screen)
-containing tree sections: remote video on the left, local video
-on the right, and the keypad with all controls and text windows
-in the center.
-The central section is built using two images: one is the skin,
-the other one is a mask where the sensitive areas of the skin
-are colored in different grayscale levels according to their
-functions. The mapping between colors and function is defined
-in the 'enum pixel_value' below.
-
-Mouse and keyboard events are detected on the whole surface, and
-handled differently according to their location, as follows:
-
-- drag on the local video window are used to move the captured
-  area (in the case of X11 grabber) or the picture-in-picture
-  location (in case of camera included on the X11 grab).
-- click on the keypad are mapped to the corresponding key;
-- drag on some keypad areas (sliders etc.) are mapped to the
-  corresponding functions;
-- keystrokes are used as keypad functions, or as text input
-  if we are in text-input mode.
-
-To manage these behavior we use two status variables,
-that defines if keyboard events should be redirect to dialing functions
-or to write message functions, and if mouse events should be used
-to implement keypad functionalities or to drag the capture device.
-
-Configuration options control the appeareance of the gui:
-
-    keypad = /tmp/phone.jpg		; the keypad on the screen
-    keypad_font = /tmp/font.ttf		; the font to use for output
-
- *
- */
-
-/* enumerate for the pixel value. 0..127 correspond to ascii chars */
-enum pixel_value {
-	/* answer/close functions */
-	KEY_PICK_UP = 128,
-	KEY_HANG_UP = 129,
-
-	/* other functions */
-	KEY_MUTE = 130,
-	KEY_AUTOANSWER = 131,
-	KEY_SENDVIDEO = 132,
-	KEY_LOCALVIDEO = 133,
-	KEY_REMOTEVIDEO = 134,
-	KEY_WRITEMESSAGE = 135,
-	KEY_GUI_CLOSE = 136,		/* close gui */
-
-	/* other areas within the keypad */
-	KEY_DIGIT_BACKGROUND = 255,
-
-	/* areas outside the keypad - simulated */
-	KEY_OUT_OF_KEYPAD = 251,
-	KEY_REM_DPY = 252,
-	KEY_LOC_DPY = 253,
-};
-
-/*
- * Handlers for the various keypad functions
- */
-
-/*! \brief append a character, or reset if '\0' */
-static void append_char(char *str, int *str_pos, const char c)
-{
-	int i = *str_pos;
-	if (c == '\0')
-		i = 0;
-	else if (i < GUI_BUFFER_LEN - 1)
-		str[i++] = c;
-	else
-		i = GUI_BUFFER_LEN - 1; /* unnecessary, i think */
-	str = '\0';
-	*str_pos = i;
-}
-
-/* accumulate digits, possibly call dial if in connected mode */
-static void keypad_digit(struct video_desc *env, int digit)
-{	
-	if (env->owner) {		/* we have a call, send the digit */
-		struct ast_frame f = { AST_FRAME_DTMF, 0 };
-
-		f.subclass = digit;
-		ast_queue_frame(env->owner, &f);
-	} else {		/* no call, accumulate digits */
-		append_char(env->gui.inbuf, &env->gui.inbuf_pos, digit);
-	}
-}
-
-/* this is a wrapper for actions that are available through the cli */
-/* TODO append arg to command and send the resulting string as cli command */
-static void keypad_send_command(struct video_desc *env, char *command)
-{	
-	ast_log(LOG_WARNING, "keypad_send_command(%s) called\n", command);
-	ast_cli_command(env->gui.outfd, command);
-	return;
-}
-
-/* function used to toggle on/off the status of some variables */
-static char *keypad_toggle(struct video_desc *env, int index)
-{
-	ast_log(LOG_WARNING, "keypad_toggle(%i) called\n", index);
-
-	switch (index) {
-	case KEY_SENDVIDEO:
-		env->out.sendvideo = !env->out.sendvideo;
-		break;
-#ifdef notyet
-	case KEY_MUTE: {
-		struct chan_oss_pvt *o = find_desc(oss_active);
-		o->mute = !o->mute;
-		}
-		break;
-	case KEY_AUTOANSWER: {
-		struct chan_oss_pvt *o = find_desc(oss_active);
-		o->autoanswer = !o->autoanswer;
-		}
-		break;
-#endif
-	}
-	return NULL;
-}
-
-char *console_do_answer(int fd);
-/*
- * Function called when the pick up button is pressed
- * perform actions according the channel status:
- *
- *  - if no one is calling us and no digits was pressed,
- *    the operation have no effects,
- *  - if someone is calling us we answer to the call.
- *  - if we have no call in progress and we pressed some
- *    digit, send the digit to the console.
- */
-static void keypad_pick_up(struct video_desc *env)
-{
-	ast_log(LOG_WARNING, "keypad_pick_up called\n");
-
-	if (env->owner) { /* someone is calling us, just answer */
-		console_do_answer(-1);
-	} else if (env->gui.inbuf_pos) { /* we have someone to call */
-		ast_cli_command(env->gui.outfd, env->gui.inbuf);
-	}
-
-	append_char(env->gui.inbuf, &env->gui.inbuf_pos, '\0'); /* clear buffer */
-}
-
-#if 0 /* still unused */
-/*
- * As an alternative to SDL_TTF, we can simply load the font from
- * an image and blit characters on the background of the GUI.
- *
- * To generate a font we can use the 'fly' command with the
- * following script (3 lines with 32 chars each)
- 
-size 320,64
-name font.png
-transparent 0,0,0
-string 255,255,255,  0, 0,giant, !"#$%&'()*+,-./0123456789:;<=>?
-string 255,255,255,  0,20,giant,@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
-string 255,255,255,  0,40,giant,`abcdefghijklmnopqrstuvwxyz{|}~
-end
-
- */
-
-/* Print given text on the gui */
-static int gui_output(struct video_desc *env, const char *text)
-{
-#ifndef HAVE_SDL_TTF
-	return 1;	/* error, not supported */
-#else
-	int x = 30, y = 20;	/* XXX change */
-	SDL_Surface *output = NULL;
-	SDL_Color color = {0, 0, 0};	/* text color */
-	SDL_Rect dest = {env->win[WIN_KEYPAD].rect.x + x, y};
-
-	/* clean surface each rewrite */
-	SDL_BlitSurface(env->gui.keypad, NULL, env->screen, &env->win[WIN_KEYPAD].rect);
-
-	output = TTF_RenderText_Solid(env->gui.font, text, color);
-	if (output == NULL) {
-		ast_log(LOG_WARNING, "Cannot render text on gui - %s\n", TTF_GetError());
-		return 1;
-	}
-
-	SDL_BlitSurface(output, NULL, env->screen, &dest);
-	
-	SDL_UpdateRects(env->gui.keypad, 1, &env->win[WIN_KEYPAD].rect);
-	SDL_FreeSurface(output);
-	return 0;	/* success */
-#endif
-}
-#endif 
-
-static int video_geom(struct fbuf_t *b, const char *s);
-static void sdl_setup(struct video_desc *env);
-static int kp_match_area(const struct keypad_entry *e, int x, int y);
-
-/*
- * Handle SDL_MOUSEBUTTONDOWN type, finding the palette
- * index value and calling the right callback.
- *
- * x, y are referred to the upper left corner of the main SDL window.
- */
-static void handle_button_event(struct video_desc *env, SDL_MouseButtonEvent button)
-{
-	uint8_t index = KEY_OUT_OF_KEYPAD;	/* the key or region of the display we clicked on */
-
-	/* for each click we come back in normal mode */
-	env->gui.text_mode = 0;
-
-	/* define keypad boundary */
-	if (button.x < env->in.rem_dpy.w)
-		index = KEY_REM_DPY; /* click on remote video */
-	else if (button.x > env->in.rem_dpy.w + env->out.keypad_dpy.w)
-		index = KEY_LOC_DPY; /* click on local video */
-	else if (button.y > env->out.keypad_dpy.h)
-		index = KEY_OUT_OF_KEYPAD; /* click outside the keypad */
-	else if (env->gui.kp) {
-		int i;
-		for (i = 0; i < env->gui.kp_used; i++) {
-			if (kp_match_area(&env->gui.kp[i], button.x - env->in.rem_dpy.w, button.y)) {
-				index = env->gui.kp[i].c;
-				break;
-			}
-		}
-	}
-
-	/* exec the function */
-	if (index < 128) {	/* surely clicked on the keypad, don't care which key */
-		keypad_digit(env, index);
-		return;
-	}
-	switch (index) {
-	/* answer/close function */
-	case KEY_PICK_UP:
-		keypad_pick_up(env);
-		break;
-	case KEY_HANG_UP:
-		keypad_send_command(env, "console hangup");
-		break;
-
-	/* other functions */
-	case KEY_MUTE:
-	case KEY_AUTOANSWER:
-	case KEY_SENDVIDEO:
-		keypad_toggle(env, index);
-		break;
-
-	case KEY_LOCALVIDEO:
-		break;
-	case KEY_REMOTEVIDEO:
-		break;
-	case KEY_WRITEMESSAGE:
-		/* goes in text-mode */
-		env->gui.text_mode = 1;
-		break;
-
-
-	/* press outside the keypad. right increases size, center decreases, left drags */
-	case KEY_LOC_DPY:
-	case KEY_REM_DPY:
-		if (button.button == SDL_BUTTON_LEFT) {
-			if (index == KEY_LOC_DPY) {
-				/* store points where the drag start
-				* and switch in drag mode */
-				env->gui.x_drag = button.x;
-				env->gui.y_drag = button.y;
-				env->gui.drag_mode = 1;
-			}
-			break;
-		} else {
-			char buf[128];
-			struct fbuf_t *fb = index == KEY_LOC_DPY ? &env->out.loc_dpy : &env->in.rem_dpy;
-			sprintf(buf, "%c%dx%d", button.button == SDL_BUTTON_RIGHT ? '>' : '<',
-				fb->w, fb->h);
-			video_geom(fb, buf);
-			sdl_setup(env);
-		}
-		break;
-	case KEY_OUT_OF_KEYPAD:
-		break;
-
-	case KEY_GUI_CLOSE:
-		cleanup_sdl(env);
-		break;
-	case KEY_DIGIT_BACKGROUND:
-		break;
-	default:
-		ast_log(LOG_WARNING, "function not yet defined %i\n", index);
-	}
-}
-
-/*
- * Handle SDL_KEYDOWN type event, put the key pressed
- * in the dial buffer or in the text-message buffer,
- * depending on the text_mode variable value.
- *
- * key is the SDLKey structure corresponding to the key pressed.
- */
-static void handle_keyboard_input(struct video_desc *env, SDLKey key)
-{
-	if (env->gui.text_mode) {
-		/* append in the text-message buffer */
-		if (key == SDLK_RETURN) {
-			/* send the text message and return in normal mode */
-			env->gui.text_mode = 0;
-			keypad_send_command(env, "send text");
-		} else {
-			/* accumulate the key in the message buffer */
-			append_char(env->gui.msgbuf, &env->gui.msgbuf_pos, key);
-		}
-	}
-	else {
-		/* append in the dial buffer */
-		append_char(env->gui.inbuf, &env->gui.inbuf_pos, key);
-	}
-
-	return;
-}
-
-/*
- * Check if the grab point is inside the X screen.
- *
- * x represent the new grab value
- * limit represent the upper value to use
- */
-static int boundary_checks(int x, int limit)
-{
-	return (x <= 0) ? 0 : (x > limit ? limit : x);
-}
-
-/* implement superlinear acceleration on the movement */
-static int move_accel(int delta)
-{
-	int d1 = delta*delta / 100;
-	return (delta > 0) ? delta + d1 : delta - d1;
-}
-
-/*
- * Move the source of the captured video.
- *
- * x_final_drag and y_final_drag are the coordinates where the drag ends,
- * start coordinares are in the gui_info structure.
- */
-static void move_capture_source(struct video_desc *env, int x_final_drag, int y_final_drag)
-{
-	int new_x, new_y;		/* new coordinates for grabbing local video */
-	int x = env->out.loc_src.x;	/* old value */
-	int y = env->out.loc_src.y;	/* old value */
-
-	/* move the origin */
-#define POLARITY -1		/* +1 or -1 depending on the desired direction */
-	new_x = x + POLARITY*move_accel(x_final_drag - env->gui.x_drag) * 3;
-	new_y = y + POLARITY*move_accel(y_final_drag - env->gui.y_drag) * 3;
-#undef POLARITY
-	env->gui.x_drag = x_final_drag;	/* update origin */
-	env->gui.y_drag = y_final_drag;
-
-	/* check boundary and let the source to grab from the new points */
-	env->out.loc_src.x = boundary_checks(new_x, env->out.screen_width - env->out.loc_src.w);
-	env->out.loc_src.y = boundary_checks(new_y, env->out.screen_height - env->out.loc_src.h);
-	return;
-}
-
-/*
- * I am seeing some kind of deadlock or stall around
- * SDL_PumpEvents() while moving the window on a remote X server
- * (both xfree-4.4.0 and xorg 7.2)
- * and windowmaker. It is unclear what causes it.
- */
-
-/* grab a bunch of events */
-static void eventhandler(struct video_desc *env)
-{
-#define N_EVENTS	32
-	int i, n;
-	SDL_Event ev[N_EVENTS];
-
-#define MY_EV (SDL_MOUSEBUTTONDOWN|SDL_KEYDOWN)
-	while ( (n = SDL_PeepEvents(ev, N_EVENTS, SDL_GETEVENT, SDL_ALLEVENTS)) > 0) {
-		for (i = 0; i < n; i++) {
-#if 0
-			ast_log(LOG_WARNING, "------ event %d at %d %d\n",
-				ev[i].type,  ev[i].button.x,  ev[i].button.y);
-#endif
-			switch (ev[i].type) {
-			case SDL_KEYDOWN:
-				handle_keyboard_input(env, ev[i].key.keysym.sym);
-				break;
-			case SDL_MOUSEMOTION:
-				if (env->gui.drag_mode != 0)
-					move_capture_source(env, ev[i].motion.x, ev[i].motion.y);
-				break;
-			case SDL_MOUSEBUTTONDOWN:
-				handle_button_event(env, ev[i].button);
-				break;
-			case SDL_MOUSEBUTTONUP:
-				if (env->gui.drag_mode != 0) {
-					move_capture_source(env, ev[i].button.x, ev[i].button.y);
-					env->gui.drag_mode = 0;
-				}
-				break;
-			}
-
-		}
-	}
-	if (1) {
-		struct timeval b, a = ast_tvnow();
-		int i;
-		//SDL_Lock_EventThread();
-		SDL_PumpEvents();
-		b = ast_tvnow();
-		i = ast_tvdiff_ms(b, a);
-		if (i > 3)
-			fprintf(stderr, "-------- SDL_PumpEvents took %dms\n", i);
-		//SDL_Unlock_EventThread();
-	}
-}
-
-static SDL_Surface *get_keypad(const char *file)
-{
-	SDL_Surface *temp;
- 
-#ifdef HAVE_SDL_IMAGE
-	temp = IMG_Load(file);
-#else
-	temp = SDL_LoadBMP(file);
-#endif
-	if (temp == NULL)
-		fprintf(stderr, "Unable to load image %s: %s\n",
-			file, SDL_GetError());
-	return temp;
-}
-
-/* TODO: consistency checks, check for bpp, widht and height */
-/* Init the mask image used to grab the action. */
-static int gui_init(struct video_desc *env)
-{
-	/* initialize keypad status */
-	env->gui.text_mode = 0;
-	env->gui.drag_mode = 0;
-
-	/* initialize grab coordinates */
-	env->out.loc_src.x = 0;
-	env->out.loc_src.y = 0;
-
-	/* initialize keyboard buffer */
-	append_char(env->gui.inbuf, &env->gui.inbuf_pos, '\0');
-	append_char(env->gui.msgbuf, &env->gui.msgbuf_pos, '\0');
-
-#ifdef HAVE_SDL_TTF
-	/* Initialize SDL_ttf library and load font */
-	if (TTF_Init() == -1) {
-		ast_log(LOG_WARNING, "Unable to init SDL_ttf, no output available\n");
-		return -1;
-	}
-
-#define GUI_FONTSIZE 28
-	env->gui.font = TTF_OpenFont( env->keypad_font, GUI_FONTSIZE);
-	if (!env->gui.font) {
-		ast_log(LOG_WARNING, "Unable to load font %s, no output available\n", env->keypad_font);
-		return -1;
-	}
-	ast_log(LOG_WARNING, "Loaded font %s\n", env->keypad_font);
-#endif
-
-	env->gui.outfd = open ("/dev/null", O_WRONLY);	/* discard output, temporary */
-	if ( env->gui.outfd < 0 ) {
-		ast_log(LOG_WARNING, "Unable output fd\n");
-		return -1;
-	}
-
-	return 0;
-}
-
-static void sdl_setup(struct video_desc *env);
-
 /*
  * Helper thread to periodically poll the video source and enqueue the
  * generated frames to the channel's queue.
@@ -2852,20 +1293,6 @@ static void init_env(struct video_desc *env)
 	copy_geometry(rd, ld);	/* local display inherits from remote display */
 }
 
-/* setup an sdl overlay and associated info, return 0 on success, != 0 on error */
-static int set_win(SDL_Surface *screen, struct display_window *win, int fmt,
-	int w, int h, int x, int y)
-{
-	win->bmp = SDL_CreateYUVOverlay(w, h, fmt, screen);
-	if (win->bmp == NULL)
-		return -1;	/* error */
-	win->rect.x = x;
-	win->rect.y = y;
-	win->rect.w = w;
-	win->rect.h = h;
-	return 0;
-}
-
 /*!
  * The first call to the video code, called by oss_new() or similar.
  * Here we initialize the various components we use, namely SDL for display,
@@ -2906,154 +1333,6 @@ void console_video_start(struct video_desc *env, struct ast_channel *owner)
 	ast_pthread_create_background(&env->vthread, NULL, video_thread, env);
 }
 
-static int keypad_cfg_read(struct gui_info *gui, const char *val);
-
-static void keypad_setup(struct video_desc *env)
-{
-	int fd = -1;
-	void *p = NULL;
-	off_t l = 0;
-
-	if (env->gui.keypad)
-		return;
-	env->gui.keypad = get_keypad(env->keypad_file);
-	if (!env->gui.keypad)
-		return;
-
-	env->out.keypad_dpy.w = env->gui.keypad->w;
-	env->out.keypad_dpy.h = env->gui.keypad->h;
-	/*
-	 * If the keypad image has a comment field, try to read
-	 * the button location from there. The block must be
-	 *	keypad_entry = token shape x0 y0 x1 y1 h
-	 *	...
-	 * (basically, lines have the same format as config file entries.
-	 * same as the keypad_entry.
-	 * You can add it to a jpeg file using wrjpgcom
-	 */
-	do { /* only once, in fact */
-		const char region[] = "region";
-		int reg_len = strlen(region);
-		const unsigned char *s, *e;
-
-		fd = open(env->keypad_file, O_RDONLY);
-		if (fd < 0) {
-			ast_log(LOG_WARNING, "fail to open %s\n", env->keypad_file);
-			break;
-		}
-		l = lseek(fd, 0, SEEK_END);
-		if (l <= 0) {
-			ast_log(LOG_WARNING, "fail to lseek %s\n", env->keypad_file);
-			break;
-		}
-		p = mmap(NULL, l, PROT_READ, 0, fd, 0);
-		if (p == NULL) {
-			ast_log(LOG_WARNING, "fail to mmap %s size %ld\n", env->keypad_file, (long)l);
-			break;
-		}
-		e = (const unsigned char *)p + l;
-		for (s = p; s < e - 20 ; s++) {
-			if (!memcmp(s, region, reg_len)) { /* keyword found */
-				/* reset previous entries */
-				keypad_cfg_read(&env->gui, "reset");
-				break;
-			}
-		}
-		for ( ;s < e - 20; s++) {
-			char buf[256];
-			const unsigned char *s1;
-			if (index(" \t\r\n", *s))	/* ignore blanks */
-				continue;
-			if (*s > 127)	/* likely end of comment */
-				break;
-			if (memcmp(s, region, reg_len)) /* keyword not found */
-				break;
-			s += reg_len;
-			l = MIN(sizeof(buf), e - s);
-			ast_copy_string(buf, s, l);
-			s1 = ast_skip_blanks(buf);	/* between token and '=' */
-			if (*s1++ != '=')	/* missing separator */
-				break;
-			if (*s1 == '>')	/* skip => */
-				s1++;
-			keypad_cfg_read(&env->gui, ast_skip_blanks(s1));
-			/* now wait for a newline */
-			s1 = s;
-			while (s1 < e - 20 && !index("\r\n", *s1) && *s1 < 128)
-				s1++;
-			s = s1;
-		}
-	} while (0);
-	if (p)
-		munmap(p, l);
-	if (fd >= 0)
-		close(fd);
-}
-
-/* [re]set the main sdl window, useful in case of resize */
-static void sdl_setup(struct video_desc *env)
-{
-	int dpy_fmt = SDL_IYUV_OVERLAY;	/* YV12 causes flicker in SDL */
-	int depth, maxw, maxh;
-	const SDL_VideoInfo *info = SDL_GetVideoInfo();
-
-	/* We want at least 16bpp to support YUV overlays.
-	 * E.g with SDL_VIDEODRIVER = aalib the default is 8
-	 */
-	depth = info->vfmt->BitsPerPixel;
-	if (depth < 16)
-		depth = 16;
-	/*
-	 * initialize the SDL environment. We have one large window
-	 * with local and remote video, and a keypad.
-	 * At the moment we arrange them statically, as follows:
-	 * - on the left, the remote video;
-	 * - on the center, the keypad
-	 * - on the right, the local video
-	 */
-
-	keypad_setup(env);
-#define BORDER	5	/* border around our windows */
-	maxw = env->in.rem_dpy.w + env->out.loc_dpy.w + env->out.keypad_dpy.w;
-	maxh = MAX( MAX(env->in.rem_dpy.h, env->out.loc_dpy.h), env->out.keypad_dpy.h);
-	maxw += 4 * BORDER;
-	maxh += 2 * BORDER;
-	env->screen = SDL_SetVideoMode(maxw, maxh, depth, 0);
-	if (!env->screen) {
-		ast_log(LOG_ERROR, "SDL: could not set video mode - exiting\n");
-		goto no_sdl;
-	}
-
-	SDL_WM_SetCaption("Asterisk console Video Output", NULL);
-	if (set_win(env->screen, &env->win[WIN_REMOTE], dpy_fmt,
-			env->in.rem_dpy.w, env->in.rem_dpy.h, BORDER, BORDER))
-		goto no_sdl;
-	if (set_win(env->screen, &env->win[WIN_LOCAL], dpy_fmt,
-			env->out.loc_dpy.w, env->out.loc_dpy.h,
-			3*BORDER+env->in.rem_dpy.w + env->out.keypad_dpy.w, BORDER))
-		goto no_sdl;
-
-	/* display the skin, but do not free it as we need it later to
-	 * restore text areas and maybe sliders too.
-	 */
-	if (env->gui.keypad) {
-		struct SDL_Rect *dest = &env->win[WIN_KEYPAD].rect;
-		dest->x = 2*BORDER + env->in.rem_dpy.w;
-		dest->y = BORDER;
-		dest->w = env->gui.keypad->w;
-		dest->h = env->gui.keypad->h;
-		SDL_BlitSurface(env->gui.keypad, NULL, env->screen, dest);
-		SDL_UpdateRects(env->screen, 1, dest);
-	}
-	env->in.dec_in_cur = &env->in.dec_in[0];
-	env->in.dec_in_dpy = NULL;	/* nothing to display */
-	env->sdl_ok = 1;
-
-no_sdl:
-	if (env->sdl_ok == 0)	/* free resources in case of errors */
-		cleanup_sdl(env);
-}
-
 /*
  * Parse a geometry string, accepting also common names for the formats.
  * Trick: if we have a leading > or < and a numeric geometry,
@@ -3103,155 +1382,6 @@ static int video_geom(struct fbuf_t *b, const char *s)
 	return 0;
 }
 
-/*
- * Functions to determine if a point is within a region. Return 1 if success.
- * First rotate the point, with
- *	x' =  (x - x0) * cos A + (y - y0) * sin A
- *	y' = -(x - x0) * sin A + (y - y0) * cos A
- * where cos A = (x1-x0)/l, sin A = (y1 - y0)/l, and
- *	l = sqrt( (x1-x0)^2 + (y1-y0)^2
- * Then determine inclusion by simple comparisons i.e.:
- *	rectangle: x >= 0 && x < l && y >= 0 && y < h
- *	ellipse: (x-xc)^2/l^2 + (y-yc)^2/h2 < 1
- */
-static int kp_match_area(const struct keypad_entry *e, int x, int y)
-{
-	double xp, dx = (e->x1 - e->x0);
-	double yp, dy = (e->y1 - e->y0);
-	double l = sqrt(dx*dx + dy*dy);
-	int ret = 0;
-
-	if (l > 1) { /* large enough */
-		xp = ((x - e->x0)*dx + (y - e->y0)*dy)/l;
-		yp = (-(x - e->x0)*dy + (y - e->y0)*dx)/l;
-		if (e->type == KP_RECT) {
-			ret = (xp >= 0 && xp < l && yp >=0 && yp < l);
-		} else if (e->type == KP_CIRCLE) {
-			dx = xp*xp/(l*l) + yp*yp/(e->h*e->h);
-			ret = (dx < 1);
-		}
-	}
-#if 0
-	ast_log(LOG_WARNING, "result %d [%d] for match %d,%d in type %d p0 %d,%d p1 %d,%d h %d\n",
-		ret, e->c, x, y, e->type, e->x0, e->y0, e->x1, e->y1, e->h);
-#endif
-	return ret;
-}
-
-/*
- * read a keypad entry line in the format
- *	reset
- *	token circle xc yc diameter
- *	token circle xc yc x1 y1 h	# ellipse, main diameter and height
- *	token rect x0 y0 x1 y1 h	# rectangle with main side and eight
- * token is the token to be returned, either a character or a symbol
- * as KEY_* above
- */
-struct _s_k { const char *s; int k; };
-static struct _s_k gui_key_map[] = {
-	{"PICK_UP",	KEY_PICK_UP },
-	{"PICKUP",	KEY_PICK_UP },
-        {"HANG_UP",	KEY_HANG_UP },
-        {"HANGUP",	KEY_HANG_UP },
-        {"MUTE",	KEY_MUTE },
-        {"AUTOANSWER",	KEY_AUTOANSWER },
-        {"SENDVIDEO",	KEY_SENDVIDEO },
-        {"LOCALVIDEO",	KEY_LOCALVIDEO },
-        {"REMOTEVIDEO",	KEY_REMOTEVIDEO },
-        {"WRITEMESSAGE", KEY_WRITEMESSAGE },
-        {"GUI_CLOSE",	KEY_GUI_CLOSE },
-        {NULL, 0 } };
-
-static int keypad_cfg_read(struct gui_info *gui, const char *val)
-{
-	struct keypad_entry e;
-	char s1[16], s2[16];
-	int i, ret = 0;
-
-	bzero(&e, sizeof(e));
-	i = sscanf(val, "%14s %14s %d %d %d %d %d",
-                s1, s2, &e.x0, &e.y0, &e.x1, &e.y1, &e.h);
-
-	switch (i) {
-	default:
-		break;
-	case 1:	/* only "reset" is allowed */
-		if (strcasecmp(s1, "reset"))	/* invalid */
-			break;
-		if (gui->kp) {
-			gui->kp_used = 0;
-		}
-		break;
-	case 5: /* token circle xc yc diameter */
-		if (strcasecmp(s2, "circle"))	/* invalid */
-			break;
-		e.h = e.x1;
-		e.y1 = e.y0;	/* map radius in x1 y1 */
-		e.x1 = e.x0 + e.h;	/* map radius in x1 y1 */
-		e.x0 = e.x0 - e.h;	/* map radius in x1 y1 */
-		/* fallthrough */
-
-	case 7: /* token circle|rect x0 y0 x1 y1 h */
-		if (e.x1 < e.x0 || e.h <= 0) {
-			ast_log(LOG_WARNING, "error in coordinates\n");
-			e.type = 0;
-			break;
-		}
-		if (!strcasecmp(s2, "circle")) {
-			/* for a circle we specify the diameter but store center and radii */
-			e.type = KP_CIRCLE;
-			e.x0 = (e.x1 + e.x0) / 2;
-			e.y0 = (e.y1 + e.y0) / 2;
-			e.h = e.h / 2;
-		} else if (!strcasecmp(s2, "rect")) {
-			e.type = KP_RECT;
-		} else
-			break;
-		ret = 1;
-	}
-	// ast_log(LOG_WARNING, "reading [%s] returns %d %d\n", val, i, ret);
-	if (ret == 0)
-		return 0;
-	/* map the string into token to be returned */
-	i = atoi(s1);
-	if (i > 0 || s1[1] == '\0')	/* numbers or single characters */
-		e.c = (i > 9) ? i : s1[0];
-	else {
-		struct _s_k *p;
-		for (p = gui_key_map; p->s; p++) {
-			if (!strcasecmp(p->s, s1)) {
-				e.c = p->k;
-				break;
-			}
-		}
-	}
-	if (e.c == 0) {
-		ast_log(LOG_WARNING, "missing token\n");
-		return 0;
-	}
-	if (gui->kp_size == 0) {
-		gui->kp = ast_calloc(10, sizeof(e));
-		if (gui->kp == NULL) {
-			ast_log(LOG_WARNING, "cannot allocate kp");
-			return 0;
-		}
-		gui->kp_size = 10;
-	}
-	if (gui->kp_size == gui->kp_used) { /* must allocate */
-		struct keypad_entry *a = ast_realloc(gui->kp, sizeof(e)*(gui->kp_size+10));
-		if (a == NULL) {
-			ast_log(LOG_WARNING, "cannot reallocate kp");
-			return 0;
-		}
-		gui->kp = a;
-		gui->kp_size += 10;
-	}
-	if (gui->kp_size == gui->kp_used)
-		return 0;
-	gui->kp[gui->kp_used++] = e;
-	return 1;
-}
-
 /* extend ast_cli with video commands. Called by console_video_config */
 int console_video_cli(struct video_desc *env, const char *var, int fd)
 {
diff --git a/channels/vcodecs.c b/channels/vcodecs.c
new file mode 100644
index 0000000000000000000000000000000000000000..197726eb9886aff7c26204c74eeb1e55cdce605e
--- /dev/null
+++ b/channels/vcodecs.c
@@ -0,0 +1,1009 @@
+/*
+ * Video codecs support for console_video.c
+ * $Revision$
+ */
+
+/*
+ * Each codec is defined by a number of callbacks
+ */
+/*! \brief initialize the encoder */
+typedef int (*encoder_init_f)(AVCodecContext *v);
+
+/*! \brief actually call the encoder */
+typedef int (*encoder_encode_f)(struct video_out_desc *v);
+
+/*! \brief encapsulate the bistream in RTP frames */
+typedef struct ast_frame *(*encoder_encap_f)(struct video_out_desc *out,
+		struct ast_frame **tail);
+
+/*! \brief inizialize the decoder */
+typedef int (*decoder_init_f)(AVCodecContext *enc_ctx);
+
+/*! \brief extract the bitstream from RTP frames and store in the fbuf.
+ * return 0 if ok, 1 on error
+ */
+typedef int (*decoder_decap_f)(struct fbuf_t *b, uint8_t *data, int len);
+
+/*! \brief actually call the decoder */
+typedef int (*decoder_decode_f)(struct video_in_desc *v, struct fbuf_t *b);
+
+struct video_codec_desc {
+	const char		*name;		/* format name */
+	int			format;		/* AST_FORMAT_* */
+	encoder_init_f		enc_init;
+	encoder_encap_f		enc_encap;
+	encoder_encode_f	enc_run;
+	decoder_init_f		dec_init;
+	decoder_decap_f		dec_decap;
+	decoder_decode_f	dec_run;
+};
+
+#ifdef debugging_only
+
+/* some debugging code to check the bitstream:
+ * declare a bit buffer, initialize it, and fetch data from it.
+ */
+struct bitbuf {
+	const uint8_t *base;
+	int	bitsize;	/* total size in bits */
+	int	ofs;	/* next bit to read */
+};
+
+static struct bitbuf bitbuf_init(const uint8_t *base, int bitsize, int start_ofs)
+{
+	struct bitbuf a;
+	a.base = base;
+	a.bitsize = bitsize;
+	a.ofs = start_ofs;
+	return a;
+}
+
+static int bitbuf_left(struct bitbuf *b)
+{
+	return b->bitsize - b->ofs;
+}
+
+static uint32_t getbits(struct bitbuf *b, int n)
+{
+	int i, ofs;
+	const uint8_t *d;
+	uint8_t mask;
+	uint32_t retval = 0;
+	if (n> 31) {
+		ast_log(LOG_WARNING, "too many bits %d, max 32\n", n);
+		return 0;
+	}
+	if (n + b->ofs > b->bitsize) {
+		ast_log(LOG_WARNING, "bitbuf overflow %d of %d\n", n + b->ofs, b->bitsize);
+		n = b->bitsize - b->ofs;
+	}
+	ofs = 7 - b->ofs % 8;	/* start from msb */
+	mask = 1 << ofs;
+	d = b->base + b->ofs / 8;	/* current byte */
+	for (i=0 ; i < n; i++) {
+		retval += retval + (*d & mask ? 1 : 0);	/* shift in new byte */
+		b->ofs++;
+		mask >>= 1;
+		if (mask == 0) {
+			d++;
+			mask = 0x80;
+		}
+	}
+	return retval;
+}
+
+static void check_h261(struct fbuf_t *b)
+{
+	struct bitbuf a = bitbuf_init(b->data, b->used * 8, 0);
+	uint32_t x, y;
+	
+	x = getbits(&a, 20);	/* PSC, 0000 0000 0000 0001 0000 */
+	if (x != 0x10) {
+		ast_log(LOG_WARNING, "bad PSC 0x%x\n", x);
+		return;
+	}
+	x = getbits(&a, 5);	/* temporal reference */
+	y = getbits(&a, 6);	/* ptype */
+	if (0)
+	ast_log(LOG_WARNING, "size %d TR %d PTY spl %d doc %d freeze %d %sCIF hi %d\n",
+		b->used,
+		x,
+		(y & 0x20) ? 1 : 0,
+		(y & 0x10) ? 1 : 0,
+		(y & 0x8) ? 1 : 0,
+		(y & 0x4) ? "" : "Q",
+		(y & 0x2) ? 1:0);
+	while ( (x = getbits(&a, 1)) == 1)
+		ast_log(LOG_WARNING, "PSPARE 0x%x\n", getbits(&a, 8));
+	// ast_log(LOG_WARNING, "PSPARE 0 - start GOB LAYER\n");
+	while ( (x = bitbuf_left(&a)) > 0) {
+		// ast_log(LOG_WARNING, "GBSC %d bits left\n", x);
+		x = getbits(&a, 16); /* GBSC 0000 0000 0000 0001 */
+		if (x != 0x1) {
+			ast_log(LOG_WARNING, "bad GBSC 0x%x\n", x);
+			break;
+		}
+		x = getbits(&a, 4);	/* group number */
+		y = getbits(&a, 5);	/* gquant */
+		if (x == 0) {
+			ast_log(LOG_WARNING, "  bad GN %d\n", x);
+			break;
+		}
+		while ( (x = getbits(&a, 1)) == 1)
+			ast_log(LOG_WARNING, "GSPARE 0x%x\n", getbits(&a, 8));
+		while ( (x = bitbuf_left(&a)) > 0) { /* MB layer */
+			break;
+		}
+	}
+}
+
+void dump_buf(struct fbuf_t *b);
+void dump_buf(struct fbuf_t *b)
+{
+	int i, x, last2lines;
+	char buf[80];
+
+	last2lines = (b->used - 16) & ~0xf;
+	ast_log(LOG_WARNING, "buf size %d of %d\n", b->used, b->size);
+	for (i = 0; i < b->used; i++) {
+		x = i & 0xf;
+		if ( x == 0) {	/* new line */
+			if (i != 0)
+				ast_log(LOG_WARNING, "%s\n", buf);
+			bzero(buf, sizeof(buf));
+			sprintf(buf, "%04x: ", i);
+		}
+		sprintf(buf + 6 + x*3, "%02x ", b->data[i]);
+		if (i > 31 && i < last2lines)
+			i = last2lines - 1;
+	}
+	if (buf[0])
+		ast_log(LOG_WARNING, "%s\n", buf);
+}
+#endif /* debugging_only */
+/*
+ * Here starts the glue code for the various supported video codecs.
+ * For each of them, we need to provide routines for initialization,
+ * calling the encoder, encapsulating the bitstream in ast_frames,
+ * extracting payload from ast_frames, and calling the decoder.
+ */
+
+/*--- h263+ support --- */
+
+/*! \brief initialization of h263p */
+static int h263p_enc_init(AVCodecContext *enc_ctx)
+{
+	/* modes supported are
+	- Unrestricted Motion Vector (annex D)
+	- Advanced Prediction (annex F)
+	- Advanced Intra Coding (annex I)
+	- Deblocking Filter (annex J)
+	- Slice Structure (annex K)
+	- Alternative Inter VLC (annex S)
+	- Modified Quantization (annex T)
+	*/
+	enc_ctx->flags |=CODEC_FLAG_H263P_UMV; /* annex D */
+	enc_ctx->flags |=CODEC_FLAG_AC_PRED; /* annex f ? */
+	enc_ctx->flags |=CODEC_FLAG_H263P_SLICE_STRUCT; /* annex k */
+	enc_ctx->flags |= CODEC_FLAG_H263P_AIC; /* annex I */
+
+	return 0;
+}
+
+
+/*
+ * Create RTP/H.263 fragments to avoid IP fragmentation. We fragment on a
+ * PSC or a GBSC, but if we don't find a suitable place just break somewhere.
+ * Everything is byte-aligned.
+ */
+static struct ast_frame *h263p_encap(struct video_out_desc *out,
+	struct ast_frame **tail)
+{
+	struct ast_frame *cur = NULL, *first = NULL;
+	uint8_t *d = out->enc_out.data;
+	int len = out->enc_out.used;
+	int l = len; /* size of the current fragment. If 0, must look for a psc */
+
+	for (;len > 0; len -= l, d += l) {
+		uint8_t *data;
+		struct ast_frame *f;
+		int i, h;
+
+		if (len >= 3 && d[0] == 0 && d[1] == 0 && d[2] >= 0x80) {
+			/* we are starting a new block, so look for a PSC. */
+			for (i = 3; i < len - 3; i++) {
+				if (d[i] == 0 && d[i+1] == 0 && d[i+2] >= 0x80) {
+					l = i;
+					break;
+				}
+			}
+		}
+		if (l > out->mtu || l > len) { /* psc not found, split */
+			l = MIN(len, out->mtu);
+		}
+		if (l < 1 || l > out->mtu) {
+			ast_log(LOG_WARNING, "--- frame error l %d\n", l);
+			break;
+		}
+		
+		if (d[0] == 0 && d[1] == 0) { /* we start with a psc */
+			h = 0;
+		} else { /* no psc, create a header */
+			h = 2;
+		}
+
+		f = create_video_frame(d, d+l, AST_FORMAT_H263_PLUS, h, cur);
+		if (!f)
+			break;
+
+		data = f->data;
+		if (h == 0) {	/* we start with a psc */
+			data[0] |= 0x04;	// set P == 1, and we are done
+		} else {	/* no psc, create a header */
+			data[0] = data[1] = 0;	// P == 0
+		}
+
+		if (!cur)
+			first = f;
+		cur = f;
+	}
+
+	if (cur)
+		cur->subclass |= 1; // RTP Marker
+
+	*tail = cur;	/* end of the list */
+	return first;
+}
+
+/*! \brief extract the bitstreem from the RTP payload.
+ * This is format dependent.
+ * For h263+, the format is defined in RFC 2429
+ * and basically has a fixed 2-byte header as follows:
+ * 5 bits	RR	reserved, shall be 0
+ * 1 bit	P	indicate a start/end condition,
+ *			in which case the payload should be prepended
+ *			by two zero-valued bytes.
+ * 1 bit	V	there is an additional VRC header after this header
+ * 6 bits	PLEN	length in bytes of extra picture header
+ * 3 bits	PEBIT	how many bits to be ignored in the last byte
+ *
+ * XXX the code below is not complete.
+ */
+static int h263p_decap(struct fbuf_t *b, uint8_t *data, int len)
+{
+	int PLEN;
+
+	if (len < 2) {
+		ast_log(LOG_WARNING, "invalid framesize %d\n", len);
+		return 1;
+	}
+	PLEN = ( (data[0] & 1) << 5 ) | ( (data[1] & 0xf8) >> 3);
+
+	if (PLEN > 0) {
+		data += PLEN;
+		len -= PLEN;
+	}
+	if (data[0] & 4)	/* bit P */
+		data[0] = data[1] = 0;
+	else {
+		data += 2;
+		len -= 2;
+	}
+	return fbuf_append(b, data, len, 0, 0);	/* ignore trail bits */
+}
+
+
+/*
+ * generic encoder, used by the various protocols supported here.
+ * We assume that the buffer is empty at the beginning.
+ */
+static int ffmpeg_encode(struct video_out_desc *v)
+{
+	struct fbuf_t *b = &v->enc_out;
+	int i;
+
+	b->used = avcodec_encode_video(v->enc_ctx, b->data, b->size, v->enc_in_frame);
+	i = avcodec_encode_video(v->enc_ctx, b->data + b->used, b->size - b->used, NULL); /* delayed frames ? */
+	if (i > 0) {
+		ast_log(LOG_WARNING, "have %d more bytes\n", i);
+		b->used += i;
+	}
+	return 0;
+}
+
+/*
+ * Generic decoder, which is used by h263p, h263 and h261 as it simply
+ * invokes ffmpeg's decoder.
+ * av_parser_parse should merge a randomly chopped up stream into
+ * proper frames. After that, if we have a valid frame, we decode it
+ * until the entire frame is processed.
+ */
+static int ffmpeg_decode(struct video_in_desc *v, struct fbuf_t *b)
+{
+	uint8_t *src = b->data;
+	int srclen = b->used;
+	int full_frame = 0;
+
+	if (srclen == 0)	/* no data */
+		return 0;
+	while (srclen) {
+		uint8_t *data;
+		int datalen, ret;
+		int len = av_parser_parse(v->parser, v->dec_ctx, &data, &datalen, src, srclen, 0, 0);
+
+		src += len;
+		srclen -= len;
+		/* The parser might return something it cannot decode, so it skips
+		 * the block returning no data
+		 */
+		if (data == NULL || datalen == 0)
+			continue;
+		ret = avcodec_decode_video(v->dec_ctx, v->d_frame, &full_frame, data, datalen);
+		if (full_frame == 1)	/* full frame */
+			break;
+		if (ret < 0) {
+			ast_log(LOG_NOTICE, "Error decoding\n");
+			break;
+		}
+	}
+	if (srclen != 0)	/* update b with leftover data */
+		bcopy(src, b->data, srclen);
+	b->used = srclen;
+	b->ebit = 0;
+	return full_frame;
+}
+
+static struct video_codec_desc h263p_codec = {
+	.name = "h263p",
+	.format = AST_FORMAT_H263_PLUS,
+	.enc_init = h263p_enc_init,
+	.enc_encap = h263p_encap,
+	.enc_run = ffmpeg_encode,
+	.dec_init = NULL,
+	.dec_decap = h263p_decap,
+	.dec_run = ffmpeg_decode
+};
+
+/*--- Plain h263 support --------*/
+
+static int h263_enc_init(AVCodecContext *enc_ctx)
+{
+	/* XXX check whether these are supported */
+	enc_ctx->flags |= CODEC_FLAG_H263P_UMV;
+	enc_ctx->flags |= CODEC_FLAG_H263P_AIC;
+	enc_ctx->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
+	enc_ctx->flags |= CODEC_FLAG_AC_PRED;
+
+	return 0;
+}
+
+/*
+ * h263 encapsulation is specified in RFC2190. There are three modes
+ * defined (A, B, C), with 4, 8 and 12 bytes of header, respectively.
+ * The header is made as follows
+ *     0.....................|.......................|.............|....31
+ *	F:1 P:1 SBIT:3 EBIT:3 SRC:3 I:1 U:1 S:1 A:1 R:4 DBQ:2 TRB:3 TR:8
+ * FP = 0- mode A, (only one word of header)
+ * FP = 10 mode B, and also means this is an I or P frame
+ * FP = 11 mode C, and also means this is a PB frame.
+ * SBIT, EBIT nuber of bits to ignore at beginning (msbits) and end (lsbits)
+ * SRC  bits 6,7,8 from the h263 PTYPE field
+ * I = 0 intra-coded, 1 = inter-coded (bit 9 from PTYPE)
+ * U = 1 for Unrestricted Motion Vector (bit 10 from PTYPE)
+ * S = 1 for Syntax Based Arith coding (bit 11 from PTYPE)
+ * A = 1 for Advanced Prediction (bit 12 from PTYPE)
+ * R = reserved, must be 0
+ * DBQ = differential quantization, DBQUANT from h263, 0 unless we are using
+ *	PB frames
+ * TRB = temporal reference for bframes, also 0 unless this is a PB frame
+ * TR = temporal reference for P frames, also 0 unless PB frame.
+ *
+ * Mode B and mode C description omitted.
+ *
+ * An RTP frame can start with a PSC 0000 0000 0000 0000 1000 0
+ * or with a GBSC, which also has the first 17 bits as a PSC.
+ * Note - PSC are byte-aligned, GOB not necessarily. PSC start with
+ *	PSC:22 0000 0000 0000 0000 1000 00 	picture start code
+ *	TR:8   .... ....			temporal reference
+ *      PTYPE:13 or more 			ptype...
+ * If we don't fragment a GOB SBIT and EBIT = 0.
+ * reference, 8 bit) 
+ * 
+ * The assumption below is that we start with a PSC.
+ */
+static struct ast_frame *h263_encap(struct video_out_desc *out,
+		struct ast_frame **tail)
+{
+	uint8_t *d = out->enc_out.data;
+	int start = 0, i, len = out->enc_out.used;
+	struct ast_frame *f, *cur = NULL, *first = NULL;
+	const int pheader_len = 4;	/* Use RFC-2190 Mode A */
+	uint8_t h263_hdr[12];	/* worst case, room for a type c header */
+	uint8_t *h = h263_hdr;	/* shorthand */
+
+#define H263_MIN_LEN	6
+	if (len < H263_MIN_LEN)	/* unreasonably small */
+		return NULL;
+
+	bzero(h263_hdr, sizeof(h263_hdr));
+	/* Now set the header bytes. Only type A by now,
+	 * and h[0] = h[2] = h[3] = 0 by default.
+	 * PTYPE starts 30 bits in the picture, so the first useful
+	 * bit for us is bit 36 i.e. within d[4] (0 is the msbit).
+	 * SRC = d[4] & 0x1c goes into data[1] & 0xe0
+	 * I   = d[4] & 0x02 goes into data[1] & 0x10
+	 * U   = d[4] & 0x01 goes into data[1] & 0x08
+	 * S   = d[5] & 0x80 goes into data[1] & 0x04
+	 * A   = d[5] & 0x40 goes into data[1] & 0x02
+	 * R   = 0           goes into data[1] & 0x01
+	 * Optimizing it, we have
+	 */
+	h[1] = ( (d[4] & 0x1f) << 3 ) |	/* SRC, I, U */
+		( (d[5] & 0xc0) >> 5 );		/* S, A, R */
+
+	/* now look for the next PSC or GOB header. First try to hit
+	 * a '0' byte then look around for the 0000 0000 0000 0000 1 pattern
+	 * which is both in the PSC and the GBSC.
+	 */
+	for (i = H263_MIN_LEN, start = 0; start < len; start = i, i += 3) {
+		//ast_log(LOG_WARNING, "search at %d of %d/%d\n", i, start, len);
+		for (; i < len ; i++) {
+			uint8_t x, rpos, lpos;
+			int rpos_i;	/* index corresponding to rpos */
+			if (d[i] != 0)		/* cannot be in a GBSC */
+				continue;
+			if (i > len - 1)
+				break;
+			x = d[i+1];
+			if (x == 0)	/* next is equally good */
+				continue;
+			/* see if around us we can make 16 '0' bits for the GBSC.
+			 * Look for the first bit set on the right, and then
+			 * see if we have enough 0 on the left.
+			 * We are guaranteed to end before rpos == 0
+			 */
+			for (rpos = 0x80, rpos_i = 8; rpos; rpos >>= 1, rpos_i--)
+				if (x & rpos)	/* found the '1' bit in GBSC */
+					break;
+			x = d[i-1];		/* now look behind */
+			for (lpos = rpos; lpos ; lpos >>= 1)
+				if (x & lpos)	/* too early, not a GBSC */
+					break;
+			if (lpos)		/* as i said... */
+				continue;
+			/* now we have a GBSC starting somewhere in d[i-1],
+			 * but it might be not byte-aligned
+			 */
+			if (rpos == 0x80) {	/* lucky case */
+				i = i - 1;
+			} else {	/* XXX to be completed */
+				ast_log(LOG_WARNING, "unaligned GBSC 0x%x %d\n",
+					rpos, rpos_i);
+			}
+			break;
+		}
+		/* This frame is up to offset i (not inclusive).
+		 * We do not split it yet even if larger than MTU.
+		 */
+		f = create_video_frame(d + start, d+i, AST_FORMAT_H263,
+				pheader_len, cur);
+
+		if (!f)
+			break;
+		bcopy(h, f->data, 4);	/* copy the h263 header */
+		/* XXX to do: if not aligned, fix sbit and ebit,
+		 * then move i back by 1 for the next frame
+		 */
+		if (!cur)
+			first = f;
+		cur = f;
+	}
+
+	if (cur)
+		cur->subclass |= 1;	// RTP Marker
+
+	*tail = cur;
+	return first;
+}
+
+/* XXX We only drop the header here, but maybe we need more. */
+static int h263_decap(struct fbuf_t *b, uint8_t *data, int len)
+{
+	if (len < 4) {
+		ast_log(LOG_WARNING, "invalid framesize %d\n", len);
+		return 1;	/* error */
+	}
+
+	if ( (data[0] & 0x80) == 0) {
+		len -= 4;
+		data += 4;
+	} else {
+		ast_log(LOG_WARNING, "unsupported mode 0x%x\n",
+			data[0]);
+		return 1;
+	}
+	return fbuf_append(b, data, len, 0, 0);	/* XXX no bit alignment support yet */
+}
+
+static struct video_codec_desc h263_codec = {
+	.name = "h263",
+	.format = AST_FORMAT_H263,
+	.enc_init = h263_enc_init,
+	.enc_encap = h263_encap,
+	.enc_run = ffmpeg_encode,
+	.dec_init = NULL,
+	.dec_decap = h263_decap,
+	.dec_run = ffmpeg_decode
+						
+};
+
+/*---- h261 support -----*/
+static int h261_enc_init(AVCodecContext *enc_ctx)
+{
+	/* It is important to set rtp_payload_size = 0, otherwise
+	 * ffmpeg in h261 mode will produce output that it cannot parse.
+	 * Also try to send I frames more frequently than with other codecs.
+	 */
+	enc_ctx->rtp_payload_size = 0; /* important - ffmpeg fails otherwise */
+
+	return 0;
+}
+
+/*
+ * The encapsulation of H261 is defined in RFC4587 which obsoletes RFC2032
+ * The bitstream is preceded by a 32-bit header word:
+ *  SBIT:3 EBIT:3 I:1 V:1 GOBN:4 MBAP:5 QUANT:5 HMVD:5 VMVD:5
+ * SBIT and EBIT are the bits to be ignored at beginning and end,
+ * I=1 if the stream has only INTRA frames - cannot change during the stream.
+ * V=0 if motion vector is not used. Cannot change.
+ * GOBN is the GOB number in effect at the start of packet, 0 if we
+ *	start with a GOB header
+ * QUANT is the quantizer in effect, 0 if we start with GOB header
+ * HMVD  reference horizontal motion vector. 10000 is forbidden
+ * VMVD  reference vertical motion vector, as above.
+ * Packetization should occur at GOB boundaries, and if not possible
+ * with MacroBlock fragmentation. However it is likely that blocks
+ * are not bit-aligned so we must take care of this.
+ */
+static struct ast_frame *h261_encap(struct video_out_desc *out,
+		struct ast_frame **tail)
+{
+	uint8_t *d = out->enc_out.data;
+	int start = 0, i, len = out->enc_out.used;
+	struct ast_frame *f, *cur = NULL, *first = NULL;
+	const int pheader_len = 4;
+	uint8_t h261_hdr[4];
+	uint8_t *h = h261_hdr;	/* shorthand */
+	int sbit = 0, ebit = 0;
+
+#define H261_MIN_LEN 10
+	if (len < H261_MIN_LEN)	/* unreasonably small */
+		return NULL;
+
+	bzero(h261_hdr, sizeof(h261_hdr));
+
+	/* Similar to the code in h263_encap, but the marker there is longer.
+	 * Start a few bytes within the bitstream to avoid hitting the marker
+	 * twice. Note we might access the buffer at len, but this is ok because
+	 * the caller has it oversized.
+	 */
+	for (i = H261_MIN_LEN, start = 0; start < len - 1; start = i, i += 4) {
+#if 0	/* test - disable packetization */
+		i = len;	/* wrong... */
+#else
+		int found = 0, found_ebit = 0;	/* last GBSC position found */
+		for (; i < len ; i++) {
+			uint8_t x, rpos, lpos;
+			if (d[i] != 0)		/* cannot be in a GBSC */
+				continue;
+			x = d[i+1];
+			if (x == 0)	/* next is equally good */
+				continue;
+			/* See if around us we find 15 '0' bits for the GBSC.
+			 * Look for the first bit set on the right, and then
+			 * see if we have enough 0 on the left.
+			 * We are guaranteed to end before rpos == 0
+			 */
+			for (rpos = 0x80, ebit = 7; rpos; ebit--, rpos >>= 1)
+				if (x & rpos)	/* found the '1' bit in GBSC */
+					break;
+			x = d[i-1];		/* now look behind */
+			for (lpos = (rpos >> 1); lpos ; lpos >>= 1)
+				if (x & lpos)	/* too early, not a GBSC */
+					break;
+			if (lpos)		/* as i said... */
+				continue;
+			/* now we have a GBSC starting somewhere in d[i-1],
+			 * but it might be not byte-aligned. Just remember it.
+			 */
+			if (i - start > out->mtu) /* too large, stop now */
+				break;
+			found_ebit = ebit;
+			found = i;
+			i += 4;	/* continue forward */
+		}
+		if (i >= len) {	/* trim if we went too forward */
+			i = len;
+			ebit = 0;	/* hopefully... should ask the bitstream ? */
+		}
+		if (i - start > out->mtu && found) {
+			/* use the previous GBSC, hope is within the mtu */
+			i = found;
+			ebit = found_ebit;
+		}
+#endif /* test */
+		if (i - start < 4)	/* XXX too short ? */
+			continue;
+		/* This frame is up to offset i (not inclusive).
+		 * We do not split it yet even if larger than MTU.
+		 */
+		f = create_video_frame(d + start, d+i, AST_FORMAT_H261,
+				pheader_len, cur);
+
+		if (!f)
+			break;
+		/* recompute header with I=0, V=1 */
+		h[0] = ( (sbit & 7) << 5 ) | ( (ebit & 7) << 2 ) | 1;
+		bcopy(h, f->data, 4);	/* copy the h261 header */
+		if (ebit)	/* not aligned, restart from previous byte */
+			i--;
+		sbit = (8 - ebit) & 7;
+		ebit = 0;
+		if (!cur)
+			first = f;
+		cur = f;
+	}
+	if (cur)
+		cur->subclass |= 1;	// RTP Marker
+
+	*tail = cur;
+	return first;
+}
+
+/*
+ * Pieces might be unaligned so we really need to put them together.
+ */
+static int h261_decap(struct fbuf_t *b, uint8_t *data, int len)
+{
+	int ebit, sbit;
+
+	if (len < 8) {
+		ast_log(LOG_WARNING, "invalid framesize %d\n", len);
+		return 1;
+	}
+	sbit = (data[0] >> 5) & 7;
+	ebit = (data[0] >> 2) & 7;
+	len -= 4;
+	data += 4;
+	return fbuf_append(b, data, len, sbit, ebit);
+}
+
+static struct video_codec_desc h261_codec = {
+	.name = "h261",
+	.format = AST_FORMAT_H261,
+	.enc_init = h261_enc_init,
+	.enc_encap = h261_encap,
+	.enc_run = ffmpeg_encode,
+	.dec_init = NULL,
+	.dec_decap = h261_decap,
+	.dec_run = ffmpeg_decode
+};
+
+/* mpeg4 support */
+static int mpeg4_enc_init(AVCodecContext *enc_ctx)
+{
+#if 0
+	//enc_ctx->flags |= CODEC_FLAG_LOW_DELAY; /*don't use b frames ?*/
+	enc_ctx->flags |= CODEC_FLAG_AC_PRED;
+	enc_ctx->flags |= CODEC_FLAG_H263P_UMV;
+	enc_ctx->flags |= CODEC_FLAG_QPEL;
+	enc_ctx->flags |= CODEC_FLAG_4MV;
+	enc_ctx->flags |= CODEC_FLAG_GMC;
+	enc_ctx->flags |= CODEC_FLAG_LOOP_FILTER;
+	enc_ctx->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
+#endif
+	enc_ctx->rtp_payload_size = 0; /* important - ffmpeg fails otherwise */
+	return 0;
+}
+
+/* simplistic encapsulation - just split frames in mtu-size units */
+static struct ast_frame *mpeg4_encap(struct  video_out_desc *out,
+	struct ast_frame **tail)
+{
+	struct ast_frame *f, *cur = NULL, *first = NULL;
+	uint8_t *d = out->enc_out.data;
+	uint8_t *end = d+out->enc_out.used;
+	int len;
+
+	for (;d < end; d += len, cur = f) {
+		len = MIN(out->mtu, end-d);
+		f = create_video_frame(d, d+len, AST_FORMAT_MP4_VIDEO, 0, cur);
+		if (!f)
+			break;
+		if (!first)
+			first = f;
+	}
+	if (cur)
+		cur->subclass |= 1;
+	*tail = cur;
+	return first;
+}
+
+static int mpeg4_decap(struct fbuf_t *b, uint8_t *data, int len)
+{
+	return fbuf_append(b, data, len, 0, 0);
+}
+
+static int mpeg4_decode(struct video_in_desc *v, struct fbuf_t *b)
+{
+	int full_frame = 0, datalen = b->used;
+	int ret = avcodec_decode_video(v->dec_ctx, v->d_frame, &full_frame,
+		b->data, datalen);
+	if (ret < 0) {
+		ast_log(LOG_NOTICE, "Error decoding\n");
+		ret = datalen; /* assume we used everything. */
+	}
+	datalen -= ret;
+	if (datalen > 0)	/* update b with leftover bytes */
+		bcopy(b->data + ret, b->data, datalen);
+	b->used = datalen;
+	b->ebit = 0;
+	return full_frame;
+}
+
+static struct video_codec_desc mpeg4_codec = {
+	.name = "mpeg4",
+	.format = AST_FORMAT_MP4_VIDEO,
+	.enc_init = mpeg4_enc_init,
+	.enc_encap = mpeg4_encap,
+	.enc_run = ffmpeg_encode,
+	.dec_init = NULL,
+	.dec_decap = mpeg4_decap,
+	.dec_run = mpeg4_decode
+};
+
+static int h264_enc_init(AVCodecContext *enc_ctx)
+{
+	enc_ctx->flags |= CODEC_FLAG_TRUNCATED;
+	//enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
+	//enc_ctx->flags2 |= CODEC_FLAG2_FASTPSKIP;
+	/* TODO: Maybe we need to add some other flags */
+	enc_ctx->rtp_mode = 0;
+	enc_ctx->rtp_payload_size = 0;
+	enc_ctx->bit_rate_tolerance = enc_ctx->bit_rate;
+	return 0;
+}
+
+static int h264_dec_init(AVCodecContext *dec_ctx)
+{
+	dec_ctx->flags |= CODEC_FLAG_TRUNCATED;
+
+	return 0;
+}
+
+/*
+ * The structure of a generic H.264 stream is:
+ * - 0..n 0-byte(s), unused, optional. one zero-byte is always present
+ *   in the first NAL before the start code prefix.
+ * - start code prefix (3 bytes): 0x000001
+ *   (the first bytestream has a 
+ *   like these 0x00000001!)
+ * - NAL header byte ( F[1] | NRI[2] | Type[5] ) where type != 0
+ * - byte-stream
+ * - 0..n 0-byte(s) (padding, unused).
+ * Segmentation in RTP only needs to be done on start code prefixes.
+ * If fragments are too long... we don't support it yet.
+ * - encapsulate (or fragment) the byte-stream (with NAL header included)
+ */
+static struct ast_frame *h264_encap(struct video_out_desc *out,
+	struct ast_frame **tail)
+{
+	struct ast_frame *f = NULL, *cur = NULL, *first = NULL;
+	uint8_t *d, *start = out->enc_out.data;
+	uint8_t *end = start + out->enc_out.used;
+
+	/* Search the first start code prefix - ITU-T H.264 sec. B.2,
+	 * and move start right after that, on the NAL header byte.
+	 */
+#define HAVE_NAL(x) (x[-4] == 0 && x[-3] == 0 && x[-2] == 0 && x[-1] == 1)
+	for (start += 4; start < end; start++) {
+		int ty = start[0] & 0x1f;
+		if (HAVE_NAL(start) && ty != 0 && ty != 31)
+			break;
+	}
+	/* if not found, or too short, we just skip the next loop and are done. */
+
+	/* Here follows the main loop to create frames. Search subsequent start
+	 * codes, and then possibly fragment the unit into smaller fragments.
+	 */
+   for (;start < end - 4; start = d) {
+	int size;		/* size of current block */
+	uint8_t hdr[2];		/* add-on header when fragmenting */
+	int ty = 0;
+
+	/* now search next nal */
+	for (d = start + 4; d < end; d++) {
+		ty = d[0] & 0x1f;
+		if (HAVE_NAL(d))
+			break;	/* found NAL */
+	}
+	/* have a block to send. d past the start code unless we overflow */
+	if (d >= end) {	/* NAL not found */
+		d = end + 4;
+	} else if (ty == 0 || ty == 31) { /* found but invalid type, skip */
+		ast_log(LOG_WARNING, "skip invalid nal type %d at %d of %d\n",
+			ty, d - out->enc_out.data, out->enc_out.used);
+		continue;
+	}
+
+	size = d - start - 4;	/* don't count the end */
+
+	if (size < out->mtu) {	// test - don't fragment
+		// Single NAL Unit
+		f = create_video_frame(start, d - 4, AST_FORMAT_H264, 0, cur);
+		if (!f)
+			break;
+		if (!first)
+			first = f;
+
+		cur = f;
+		continue;
+	}
+
+	// Fragmented Unit (Mode A: no DON, very weak)
+	hdr[0] = (*start & 0xe0) | 28;	/* mark as a fragmentation unit */
+	hdr[1] = (*start++ & 0x1f) | 0x80 ;	/* keep type and set START bit */
+	size--;		/* skip the NAL header */
+	while (size) {
+		uint8_t *data;
+		int frag_size = MIN(size, out->mtu);
+
+		f = create_video_frame(start, start+frag_size, AST_FORMAT_H264, 2, cur);
+		if (!f)
+			break;
+		size -= frag_size;	/* skip this data block */
+		start += frag_size;
+
+		data = f->data;
+		data[0] = hdr[0];
+		data[1] = hdr[1] | (size == 0 ? 0x40 : 0);	/* end bit if we are done */
+		hdr[1] &= ~0x80;	/* clear start bit for subsequent frames */
+		if (!first)
+			first = f;
+		cur = f;
+	}
+    }
+
+	if (cur)
+		cur->subclass |= 1;     // RTP Marker
+
+	*tail = cur;
+
+	return first;
+}
+
+static int h264_decap(struct fbuf_t *b, uint8_t *data, int len)
+{
+	/* Start Code Prefix (Annex B in specification) */
+	uint8_t scp[] = { 0x00, 0x00, 0x00, 0x01 };
+	int retval = 0;
+	int type, ofs = 0;
+
+	if (len < 2) {
+		ast_log(LOG_WARNING, "--- invalid len %d\n", len);
+		return 1;
+	}
+	/* first of all, check if the packet has F == 0 */
+	if (data[0] & 0x80) {
+		ast_log(LOG_WARNING, "--- forbidden packet; nal: %02x\n",
+			data[0]);
+		return 1;
+	}
+
+	type = data[0] & 0x1f;
+	switch (type) {
+	case 0:
+	case 31:
+		ast_log(LOG_WARNING, "--- invalid type: %d\n", type);
+		return 1;
+	case 24:
+	case 25:
+	case 26:
+	case 27:
+	case 29:
+		ast_log(LOG_WARNING, "--- encapsulation not supported : %d\n", type);
+		return 1;
+	case 28:	/* FU-A Unit */
+		if (data[1] & 0x80) { // S == 1, import F and NRI from next
+			data[1] &= 0x1f;	/* preserve type */
+			data[1] |= (data[0] & 0xe0);	/* import F & NRI */
+			retval = fbuf_append(b, scp, sizeof(scp), 0, 0);
+			ofs = 1;
+		} else {
+			ofs = 2;
+		}
+		break;
+	default:	/* From 1 to 23 (Single NAL Unit) */
+		retval = fbuf_append(b, scp, sizeof(scp), 0, 0);
+	}
+	if (!retval)
+		retval = fbuf_append(b, data + ofs, len - ofs, 0, 0);
+	if (retval)
+		ast_log(LOG_WARNING, "result %d\n", retval);
+	return retval;
+}
+
+static struct video_codec_desc h264_codec = {
+	.name = "h264",
+	.format = AST_FORMAT_H264,
+	.enc_init = h264_enc_init,
+	.enc_encap = h264_encap,
+	.enc_run = ffmpeg_encode,
+	.dec_init = h264_dec_init,
+	.dec_decap = h264_decap,
+	.dec_run = ffmpeg_decode
+};
+
+/*
+ * Table of translation between asterisk and ffmpeg formats.
+ * We need also a field for read and write (encoding and decoding), because
+ * e.g. H263+ uses different codec IDs in ffmpeg when encoding or decoding.
+ */
+struct _cm {    /* map ffmpeg codec types to asterisk formats */
+	uint32_t	ast_format;	/* 0 is a terminator */
+	enum CodecID	codec;
+	enum { CM_RD = 1, CM_WR = 2, CM_RDWR = 3 } rw;  /* read or write or both ? */
+	struct video_codec_desc *codec_desc;
+};
+
+static struct _cm video_formats[] = {
+        { AST_FORMAT_H263_PLUS, CODEC_ID_H263,  CM_RD }, /* incoming H263P ? */
+        { AST_FORMAT_H263_PLUS, CODEC_ID_H263P, CM_WR },
+        { AST_FORMAT_H263,      CODEC_ID_H263,  CM_RD },
+        { AST_FORMAT_H263,      CODEC_ID_H263,  CM_WR },
+        { AST_FORMAT_H261,      CODEC_ID_H261,  CM_RDWR },
+        { AST_FORMAT_H264,      CODEC_ID_H264,  CM_RDWR },
+        { AST_FORMAT_MP4_VIDEO, CODEC_ID_MPEG4, CM_RDWR },
+        { 0,                    0, 0 },
+};
+                
+
+/*! \brief map an asterisk format into an ffmpeg one */
+static enum CodecID map_video_format(uint32_t ast_format, int rw)
+{
+	struct _cm *i;
+
+	for (i = video_formats; i->ast_format != 0; i++)
+		if (ast_format & i->ast_format && rw & i->rw && rw & i->rw)
+			return i->codec;
+	return CODEC_ID_NONE;
+}
+
+/* pointers to supported codecs. We assume the first one to be non null. */
+static struct video_codec_desc *supported_codecs[] = {
+	&h263p_codec,
+	&h264_codec,
+	&h263_codec,
+	&h261_codec,
+	&mpeg4_codec,
+	NULL
+};
+
+/*
+ * Map the AST_FORMAT to the library. If not recognised, fail.
+ * This is useful in the input path where we get frames.
+ */
+static struct video_codec_desc *map_video_codec(int fmt)
+{
+	int i;
+
+	for (i = 0; supported_codecs[i]; i++)
+		if (fmt == supported_codecs[i]->format) {
+			ast_log(LOG_WARNING, "using %s for format 0x%x\n",
+				supported_codecs[i]->name, fmt);
+			return supported_codecs[i];
+		}
+	return NULL;
+}
+
+/*------ end codec specific code -----*/