rdp: Add cross thread work queues

FreeRDP has some features that start new threads and run
callback functions in them.

We need a way to punt work from these threads back to the
compositor thread.

Co-authored-by: Steve Pronovost <spronovo@microsoft.com>
Co-authored-by: Brenton DeGeer <brdegeer@microsoft.com>
Signed-off-by: Hideyuki Nagase <hideyukn@microsoft.com>
Signed-off-by: Steve Pronovost <spronovo@microsoft.com>
Signed-off-by: Brenton DeGeer <brdegeer@microsoft.com>
This commit is contained in:
Hideyuki Nagase 2022-04-29 07:22:41 -05:00 committed by Derek Foreman
parent e884e7c7b8
commit 3bdc29b934
5 changed files with 223 additions and 0 deletions

View file

@ -4,6 +4,10 @@ endif
config_h.set('BUILD_RDP_COMPOSITOR', '1')
if get_option('rdp-thread-check')
config_h.set('ENABLE_RDP_THREAD_CHECK', '1')
endif
dep_frdp = dependency('freerdp2', version: '>= 2.2.0', required: false)
if not dep_frdp.found()
error('RDP-backend requires freerdp >= 2.2.0 which was not found. Or, you can use \'-Dbackend-rdp=false\'.')

View file

@ -31,6 +31,7 @@
#include <string.h>
#include <errno.h>
#include <linux/input.h>
#include <unistd.h>
#include "rdp.h"
@ -621,6 +622,10 @@ rdp_peer_context_new(freerdp_peer* client, RdpPeerContext* context)
context->item.peer = client;
context->item.flags = RDP_PEER_OUTPUT_ENABLED;
context->loop_task_event_source_fd = -1;
context->loop_task_event_source = NULL;
wl_list_init(&context->loop_task_list);
context->rfx_context = rfx_context_new(TRUE);
if (!context->rfx_context)
return FALSE;
@ -661,6 +666,8 @@ rdp_peer_context_free(freerdp_peer* client, RdpPeerContext* context)
wl_event_source_remove(context->events[i]);
}
rdp_destroy_dispatch_task_event_source(context);
if (context->item.flags & RDP_PEER_ACTIVATED) {
weston_seat_release_keyboard(context->item.seat);
weston_seat_release_pointer(context->item.seat);
@ -1459,6 +1466,10 @@ rdp_peer_init(freerdp_peer *client, struct rdp_backend *b)
peerCtx->events[i] = 0;
wl_list_insert(&b->output->peers, &peerCtx->item.link);
if (!rdp_initialize_dispatch_task_event_source(peerCtx))
goto error_initialize;
return 0;
error_initialize:
@ -1498,6 +1509,7 @@ rdp_backend_create(struct weston_compositor *compositor,
if (b == NULL)
return NULL;
b->compositor_tid = gettid();
b->compositor = compositor;
b->base.destroy = rdp_destroy;
b->base.create_output = rdp_output_create;

View file

@ -86,6 +86,7 @@ struct rdp_backend {
bool remotefx_codec;
int external_listener_fd;
int rdp_monitor_refresh_rate;
pid_t compositor_tid;
};
enum peer_item_flags {
@ -132,9 +133,32 @@ struct rdp_peer_context {
int horizontalAccumWheelRotationPrecise;
int horizontalAccumWheelRotationDiscrete;
/* list of outstanding event_source sent from FreeRDP thread to display loop.*/
int loop_task_event_source_fd;
struct wl_event_source *loop_task_event_source;
pthread_mutex_t loop_task_list_mutex;
struct wl_list loop_task_list; /* struct rdp_loop_task::link */
};
typedef struct rdp_peer_context RdpPeerContext;
typedef void (*rdp_loop_task_func_t)(bool freeOnly, void *data);
struct rdp_loop_task {
struct wl_list link;
RdpPeerContext *peerCtx;
rdp_loop_task_func_t func;
};
#ifdef ENABLE_RDP_THREAD_CHECK
#define ASSERT_COMPOSITOR_THREAD(b) assert_compositor_thread(b)
#define ASSERT_NOT_COMPOSITOR_THREAD(b) assert_not_compositor_thread(b)
#else
#define ASSERT_COMPOSITOR_THREAD(b) (void)b
#define ASSERT_NOT_COMPOSITOR_THREAD(b) (void)b
#endif /* ENABLE_RDP_THREAD_CHECK */
#define rdp_debug_verbose(b, ...) \
rdp_debug_print(b->verbose, false, __VA_ARGS__)
#define rdp_debug_verbose_continue(b, ...) \
@ -151,6 +175,32 @@ rdp_debug_print(struct weston_log_scope *log_scope, bool cont, char *fmt, ...);
void
convert_rdp_keyboard_to_xkb_rule_names(UINT32 KeyboardType, UINT32 KeyboardSubType, UINT32 KeyboardLayout, struct xkb_rule_names *xkbRuleNames);
#ifdef ENABLE_RDP_THREAD_CHECK
void
assert_compositor_thread(struct rdp_backend *b);
void
assert_not_compositor_thread(struct rdp_backend *b);
#endif /* ENABLE_RDP_THREAD_CHECK */
bool
rdp_event_loop_add_fd(struct wl_event_loop *loop,
int fd, uint32_t mask,
wl_event_loop_fd_func_t func,
void *data,
struct wl_event_source **event_source);
void
rdp_dispatch_task_to_display_loop(RdpPeerContext *peerCtx,
rdp_loop_task_func_t func,
struct rdp_loop_task *task);
bool
rdp_initialize_dispatch_task_event_source(RdpPeerContext *peerCtx);
void
rdp_destroy_dispatch_task_event_source(RdpPeerContext *peerCtx);
static inline struct rdp_head *
to_rdp_head(struct weston_head *base)
{

View file

@ -75,3 +75,154 @@ void rdp_debug_print(struct weston_log_scope *log_scope, bool cont, char *fmt, .
end:
va_end(ap);
}
#ifdef ENABLE_RDP_THREAD_CHECK
void
assert_compositor_thread(struct rdp_backend *b)
{
assert(b->compositor_tid == gettid());
}
void
assert_not_compositor_thread(struct rdp_backend *b)
{
assert(b->compositor_tid != gettid());
}
#endif /* ENABLE_RDP_THREAD_CHECK */
bool
rdp_event_loop_add_fd(struct wl_event_loop *loop,
int fd, uint32_t mask,
wl_event_loop_fd_func_t func,
void *data, struct wl_event_source **event_source)
{
*event_source = wl_event_loop_add_fd(loop, fd, 0, func, data);
if (!*event_source) {
weston_log("%s: wl_event_loop_add_fd failed.\n", __func__);
return false;
}
wl_event_source_fd_update(*event_source, mask);
return true;
}
void
rdp_dispatch_task_to_display_loop(RdpPeerContext *peerCtx,
rdp_loop_task_func_t func,
struct rdp_loop_task *task)
{
/* this function is ONLY used to queue the task from FreeRDP thread,
* and the task to be processed at wayland display loop thread. */
ASSERT_NOT_COMPOSITOR_THREAD(peerCtx->rdpBackend);
task->peerCtx = peerCtx;
task->func = func;
pthread_mutex_lock(&peerCtx->loop_task_list_mutex);
/* this inserts at head */
wl_list_insert(&peerCtx->loop_task_list, &task->link);
pthread_mutex_unlock(&peerCtx->loop_task_list_mutex);
eventfd_write(peerCtx->loop_task_event_source_fd, 1);
}
static int
rdp_dispatch_task(int fd, uint32_t mask, void *arg)
{
RdpPeerContext *peerCtx = (RdpPeerContext *)arg;
struct rdp_loop_task *task, *tmp;
eventfd_t dummy;
/* this must be called back at wayland display loop thread */
ASSERT_COMPOSITOR_THREAD(peerCtx->rdpBackend);
eventfd_read(peerCtx->loop_task_event_source_fd, &dummy);
pthread_mutex_lock(&peerCtx->loop_task_list_mutex);
/* dequeue the first task which is at last, so use reverse. */
assert(!wl_list_empty(&peerCtx->loop_task_list));
wl_list_for_each_reverse_safe(task, tmp, &peerCtx->loop_task_list, link) {
wl_list_remove(&task->link);
break;
}
pthread_mutex_unlock(&peerCtx->loop_task_list_mutex);
/* Dispatch and task will be freed by caller. */
task->func(false, task);
return 0;
}
bool
rdp_initialize_dispatch_task_event_source(RdpPeerContext *peerCtx)
{
struct rdp_backend *b = peerCtx->rdpBackend;
struct wl_event_loop *loop;
bool ret;
if (pthread_mutex_init(&peerCtx->loop_task_list_mutex, NULL) == -1) {
weston_log("%s: pthread_mutex_init failed. %s\n", __func__, strerror(errno));
goto error_mutex;
}
assert(peerCtx->loop_task_event_source_fd == -1);
peerCtx->loop_task_event_source_fd = eventfd(0, EFD_SEMAPHORE | EFD_CLOEXEC);
if (peerCtx->loop_task_event_source_fd == -1) {
weston_log("%s: eventfd(EFD_SEMAPHORE) failed. %s\n", __func__, strerror(errno));
goto error_event_source_fd;
}
assert(wl_list_empty(&peerCtx->loop_task_list));
loop = wl_display_get_event_loop(b->compositor->wl_display);
assert(peerCtx->loop_task_event_source == NULL);
ret = rdp_event_loop_add_fd(loop,
peerCtx->loop_task_event_source_fd,
WL_EVENT_READABLE, rdp_dispatch_task,
peerCtx,
&peerCtx->loop_task_event_source);
if (!ret)
goto error_event_loop_add_fd;
return true;
error_event_loop_add_fd:
close(peerCtx->loop_task_event_source_fd);
peerCtx->loop_task_event_source_fd = -1;
error_event_source_fd:
pthread_mutex_destroy(&peerCtx->loop_task_list_mutex);
error_mutex:
return false;
}
void
rdp_destroy_dispatch_task_event_source(RdpPeerContext *peerCtx)
{
struct rdp_loop_task *task, *tmp;
/* This function must be called all virtual channel thread at FreeRDP is terminated,
* that ensures no more incoming tasks. */
if (peerCtx->loop_task_event_source) {
wl_event_source_remove(peerCtx->loop_task_event_source);
peerCtx->loop_task_event_source = NULL;
}
wl_list_for_each_reverse_safe(task, tmp, &peerCtx->loop_task_list, link) {
wl_list_remove(&task->link);
/* inform caller task is not really scheduled prior to context destruction,
* inform them to clean them up. */
task->func(true /* freeOnly */, task);
}
assert(wl_list_empty(&peerCtx->loop_task_list));
if (peerCtx->loop_task_event_source_fd != -1) {
close(peerCtx->loop_task_event_source_fd);
peerCtx->loop_task_event_source_fd = -1;
}
pthread_mutex_destroy(&peerCtx->loop_task_list_mutex);
}

View file

@ -26,6 +26,12 @@ option(
value: true,
description: 'Weston backend: RDP remote screensharing'
)
option(
'rdp-thread-check',
type: 'boolean',
value: false,
description: 'Aggressive thread sanity checks for the RDP backend'
)
option(
'screenshare',
type: 'boolean',