[prev in list] [next in list] [prev in thread] [next in thread] 

List:       linux-security-module
Subject:    Re: [PATCH v17 7/4] LSM: Fix theoretical race window.
From:       Tetsuo Handa <penguin-kernel () I-love ! SAKURA ! ne ! jp>
Date:       2014-10-30 10:32:53
Message-ID: 201410301932.JFG09067.OtFOJOFLSFMVHQ () I-love ! SAKURA ! ne ! jp
[Download RAW message or body]

>From 3f74801d776b4e4bde402236f7d47e70486920a5 Mon Sep 17 00:00:00 2001
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Date: Thu, 30 Oct 2014 15:49:51 +0900
Subject: [PATCH] LSM: Fix theoretical race window.

Since the writer side (i.e. add_hook_to_list()) does

  INIT_LIST_HEAD(&new->shl_head);
  new->shl_ops = sop;
  shp = list_last_entry(hooklist, struct security_hook_list, shl_head);
  list_add_rcu(&new->shl_head, &shp->shl_head);

in order to ensure that assignment to new->shl_ops completes before
new->shl_head becomes reachable from the hooklist list.

On architectures where CPU might see outdated P->shl_ops value when
dereferencing up-to-date P (i.e. alpha architecture),

  do {
    struct security_hook_list *P;
    list_for_each_entry(P, &hooks_##FUNC, shl_head)
      P->shl_ops->FUNC(__VA_ARGS__);
  } while (0)

P->shl_ops could still be NULL at P->shl_ops->FUNC().

Therefore, we need to use list_for_each_entry_rcu() which involves
rcu_dereference() rather than plain list_for_each_entry().
(Please see the patch at the tail of this post.)

Note that this race window is theoretical until we allow LKM-based
LSM modules because add_hook_to_list() is currently called before
SMP is enabled.

Note that this race window can be closed even after we allow LKM-based
LSM modules by statically initializing new->shl_ops at the module side.
That is, something like doing

  static struct security_hook_list kpr_hook_list[max_lsm_hooks]:
  static struct security_operations kpr_ops = {
    .name        = "kpr",
    .hook_list   = kpr_hook_list,
    .socket_bind = kpr_socket_bind,
  };
  static struct security_hook_list kpr_hook_list[max_lsm_hooks] = {
    [lsm_socket_bind].shl_ops = &kpr_ops,
  };

(where max_lsm_hooks and lsm_socket_bind are defined at
https://marc.info/?l=linux-security-module&m=141007297512310&w=2 )
and calling

  register_security(&kpr_ops)

at the module side and doing

  list_add_tail_rcu(&ops->hook_list[lsm_socket_bind]->shl_head, hooklist);

where hooklist == &hooks_socket_bind (or
hooklist == &lsm_hooks[lsm_socket_bind] if we use the array of lists
approach) at add_hook_to_list() side.

Then, add_hook_to_list() no longer fails because no memory allocation
will be done inside register_security(). Then, register_security() will
no longer fail unless there are conflicting hooks.

Well, what about drastically changing "struct security_operations" like

  struct lsm_ptrace_access_check {
    struct list_head list;
    int (*func) (struct task_struct *child, unsigned int mode);
  };
  struct lsm_ptrace_traceme {
    struct list_head list;
    int (*func) (struct task_struct *parent);
  };
  struct lsm_capget {
    struct list_head list;
    int (*func) (struct task_struct *target, kernel_cap_t *effective,
                 kernel_cap_t *inheritable, kernel_cap_t *permitted);
  };
  (...snipped...)
  struct security_operations {
    char name[SECURITY_NAME_MAX + 1];
    struct lsm_ptrace_access_check ptrace_access_check;
    struct lsm_ptrace_traceme ptrace_traceme;
    struct lsm_capget capget;
    (...snipped...)
  };

and calling like

  do {
    struct lsm_##FUNC *P;
    list_for_each_entry(P, &hooks_##FUNC, list)
      P->func(__VA_ARGS__);
  } while (0)

? This might be simple and efficient.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
 security/security.c |   22 +++++++++-------------
 1 files changed, 9 insertions(+), 13 deletions(-)

diff --git a/security/security.c b/security/security.c
index 4e338b9..e280c31 100644
--- a/security/security.c
+++ b/security/security.c
@@ -156,7 +156,7 @@ int __init register_security(struct security_operations *ops)
 	do {								\
 		struct security_hook_list *P;				\
 									\
-		list_for_each_entry(P, &hooks_##FUNC, shl_head)		\
+		list_for_each_entry_rcu(P, &hooks_##FUNC, shl_head)	\
 			P->shl_ops->FUNC(__VA_ARGS__);			\
 	} while (0)							\
 
@@ -165,7 +165,7 @@ int __init register_security(struct security_operations *ops)
 	do {								\
 		struct security_hook_list *P;				\
 									\
-		list_for_each_entry(P, &hooks_##FUNC, shl_head) {	\
+		list_for_each_entry_rcu(P, &hooks_##FUNC, shl_head) {	\
 			RC = P->shl_ops->FUNC(__VA_ARGS__);		\
 			if (RC != 0)					\
 				break;					\
@@ -179,7 +179,7 @@ int __init register_security(struct security_operations *ops)
 	do {								\
 		struct security_hook_list *P;				\
 									\
-		list_for_each_entry(P, &hooks_##FUNC, shl_head) {	\
+		list_for_each_entry_rcu(P, &hooks_##FUNC, shl_head) {	\
 			RC = P->shl_ops->FUNC(__VA_ARGS__);		\
 			if (RC != 0)					\
 				break;					\
@@ -276,7 +276,7 @@ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
 	 * agree that it should be set it will. If any module
 	 * thinks it should not be set it won't.
 	 */
-	list_for_each_entry(shp, &hooks_vm_enough_memory, shl_head) {
+	list_for_each_entry_rcu(shp, &hooks_vm_enough_memory, shl_head) {
 		rc = shp->shl_ops->vm_enough_memory(mm, pages);
 		if (rc <= 0) {
 			cap_sys_admin = 0;
@@ -1142,7 +1142,7 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
 	int rc = -ENOSYS;
 	struct security_hook_list *shp;
 
-	list_for_each_entry(shp, &hooks_task_prctl, shl_head) {
+	list_for_each_entry_rcu(shp, &hooks_task_prctl, shl_head) {
 		thisrc = shp->shl_ops->task_prctl(option, arg2, arg3,
 							arg4, arg5);
 		if (thisrc != -ENOSYS) {
@@ -1697,7 +1697,8 @@ int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
 	 * For speed optimization, we explicitly break the loop rather than
 	 * using call_int_hook2().
 	 */
-	list_for_each_entry(shp, &hooks_xfrm_state_pol_flow_match, shl_head) {
+	list_for_each_entry_rcu(shp, &hooks_xfrm_state_pol_flow_match,
+				shl_head) {
 		rc = shp->shl_ops->xfrm_state_pol_flow_match(x, xp, fl);
 		break;
 	}
@@ -1790,7 +1791,6 @@ static int __init add_hook_to_list(struct list_head *hooklist,
 					struct security_operations *sop)
 {
 	struct security_hook_list *new;
-	struct security_hook_list *shp;
 
 	if (hooklist->next == NULL)
 		INIT_LIST_HEAD(hooklist);
@@ -1802,12 +1802,8 @@ static int __init add_hook_to_list(struct list_head *hooklist,
 	if (new == NULL)
 		return -ENOMEM;
 
-	INIT_LIST_HEAD(&new->shl_head);
 	new->shl_ops = sop;
-
-	shp = list_last_entry(hooklist, struct security_hook_list, shl_head);
-	list_add_rcu(&new->shl_head, &shp->shl_head);
-
+	list_add_tail_rcu(&new->shl_head, hooklist);
 	return 0;
 }
 
@@ -2301,7 +2297,7 @@ static void clear_hook_entry(struct list_head *head,
 {
 	struct security_hook_list *shp;
 
-	list_for_each_entry(shp, head, shl_head)
+	list_for_each_entry_rcu(shp, head, shl_head)
 		if (shp->shl_ops == sop) {
 			list_del_rcu(&shp->shl_head);
 			return;
-- 
1.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-security-module" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic