[prev in list] [next in list] [prev in thread] [next in thread] 

List:       fedora-directory-commits
Subject:    =?utf-8?q?=5B389-commits=5D?= [389-ds-base] branch 389-ds-base-1.3.9 updated: Ticket 49873 - Content
From:       pagure () pagure ! io
Date:       2019-03-29 13:28:41
Message-ID: 20190329132841.18460.34944 () pagure01 ! fedoraproject ! org
[Download RAW message or body]

This is an automated email from the git hooks/post-receive script.

tbordaz pushed a commit to branch 389-ds-base-1.3.9
in repository 389-ds-base.

The following commit(s) were added to refs/heads/389-ds-base-1.3.9 by this push:
     new f6c0c84  Ticket 49873 - Contention on virtual attribute lookup
f6c0c84 is described below

commit f6c0c84e24096ca7e789af8ff03e11e2edc73a7e
Author: Thierry Bordaz <tbordaz@redhat.com>
AuthorDate: Tue Jan 15 11:13:42 2019 +0100

    Ticket 49873 - Contention on virtual attribute lookup
    
    Bug Description:
    	During lookup of the virtual attribute table (filter evaluation and returned \
                attribute)
    	the lock is acquired many times in read. For example it is acquired for each \
targetfilter aci and for  each evaluated entry.
    	Unfortunately RW lock is expensive and appears frequently on pstacks.
    	The lock exists because the table can be updated but update is very rare \
(addition of a new service provider).  So it slows down general proceeding for \
exceptional events.  
    Fix Description:
    	The fix is to acquire/release the read lock at the operation level and set a \
per-cpu flag, so that later lookup  would just check the flag.
    
    	SSL initialization does internal searches that access the vattr_global_lock
    	Call of vattr_global_lock_create needs to be called before \
slapd_do_all_nss_ssl_init.  
    	Also, 'main' may or may not fork, the initialization fo the thread private \
variable  is done either on the child or parent depending if main forks or not.
    
    	The leak is fixed using a destructor callback of the private variable and so
    	call PR_SetThreadPrivate only if there is no private variable.
    
    	This patch is the merge of the four 49873 patches done in master
    
    https://pagure.io/389-ds-base/issue/49873
    
    Reviewed by: Ludwig Krispenz, William Brown , Simon Pichugi (thanks !!)
    
    Platforms tested: F28
    
    Flag Day: no
    
    Doc impact: no
---
 ldap/servers/slapd/detach.c     |   9 +++
 ldap/servers/slapd/opshared.c   |   6 ++
 ldap/servers/slapd/proto-slap.h |   5 ++
 ldap/servers/slapd/vattr.c      | 164 +++++++++++++++++++++++++++++++++++-----
 4 files changed, 167 insertions(+), 17 deletions(-)

diff --git a/ldap/servers/slapd/detach.c b/ldap/servers/slapd/detach.c
index 681e6a7..d5c95a0 100644
--- a/ldap/servers/slapd/detach.c
+++ b/ldap/servers/slapd/detach.c
@@ -144,6 +144,10 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, \
daemon_ports_t *  }
             break;
         }
+        /* The thread private counter needs to be allocated after the fork
+         * it is not inherited from parent process
+         */
+        vattr_global_lock_create();
 
         /* call this right after the fork, but before closing stdin */
         if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, \
ports_info)) { @@ -174,6 +178,11 @@ detach(int slapd_exemode, int \
importexport_encrypt, int s_port, daemon_ports_t *  
         g_set_detached(1);
     } else { /* not detaching - call nss/ssl init */
+        /* The thread private counter needs to be allocated after the fork
+         * it is not inherited from parent process
+         */
+        vattr_global_lock_create();
+
         if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, \
ports_info)) {  return 1;
         }
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 50b7ae8..cf6cdff 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -244,6 +244,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
     int pr_idx = -1;
     Slapi_DN *orig_sdn = NULL;
     int free_sdn = 0;
+    PRBool vattr_lock_acquired = PR_FALSE;
 
     be_list[0] = NULL;
     referral_list[0] = NULL;
@@ -511,6 +512,8 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
     }
 
     slapi_pblock_set(pb, SLAPI_BACKEND_COUNT, &index);
+    vattr_rdlock();
+    vattr_lock_acquired = PR_TRUE;
 
     if (be) {
         slapi_pblock_set(pb, SLAPI_BACKEND, be);
@@ -969,6 +972,9 @@ free_and_return:
     } else if (be_single) {
         slapi_be_Unlock(be_single);
     }
+    if (vattr_lock_acquired) {
+        vattr_rd_unlock();
+    }
 
 free_and_return_nolock:
     slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 7a429b2..79017e6 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1409,6 +1409,11 @@ void subentry_create_filter(Slapi_Filter **filter);
  * vattr.c
  */
 void vattr_init(void);
+void vattr_global_lock_create(void);
+void vattr_rdlock();
+void vattr_rd_unlock();
+void vattr_wrlock();
+void vattr_wr_unlock();
 void vattr_cleanup(void);
 
 /*
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
index f7c473a..852a887 100644
--- a/ldap/servers/slapd/vattr.c
+++ b/ldap/servers/slapd/vattr.c
@@ -102,6 +102,16 @@ int vattr_basic_sp_init();
 
 void **statechange_api;
 
+struct _vattr_map
+{
+    Slapi_RWLock *lock;
+    PLHashTable *hashtable; /* Hash table */
+};
+typedef struct _vattr_map vattr_map;
+
+static vattr_map *the_map = NULL;
+static PRUintn thread_private_global_vattr_lock;
+
 /* Housekeeping Functions, called by server startup/shutdown code */
 
 /* Called on server startup, init all structures etc */
@@ -115,7 +125,136 @@ vattr_init()
     vattr_basic_sp_init();
 #endif
 }
+/*
+ * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_NewThreadPrivateIndex
 + * It is called each time:
+ *  - PR_SetThreadPrivate is call with a not NULL private value
+ *  - on thread exit
+ */
+static void
+vattr_global_lock_free(void *ptr)
+{
+    int *nb_acquired = ptr;
+    if (nb_acquired) {
+        slapi_ch_free((void **)&nb_acquired);
+    }
+}
+/* Create a private variable for each individual thread of the current process */
+void
+vattr_global_lock_create()
+{
+    if (PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, \
vattr_global_lock_free) != PR_SUCCESS) { +        slapi_log_err(SLAPI_LOG_ALERT,
+              "vattr_global_lock_create", "Failure to create global lock for virtual \
attribute !\n"); +        PR_ASSERT(0);
+    }
+}
+static int
+global_vattr_lock_get_acquired_count()
+{
+    int *nb_acquired;
+    nb_acquired = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
+    if (nb_acquired == NULL) {
+        /* if it was not initialized set it to zero */
+        nb_acquired = (int *) slapi_ch_calloc(1, sizeof(int));
+        PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) nb_acquired);
+    }
+    return *nb_acquired;
+}
+static void
+global_vattr_lock_set_acquired_count(int nb_acquired)
+{
+    int *val;
+    val = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
+    if (val == NULL) {
+        /* if it was not initialized set it to zero */
+        val = (int *) slapi_ch_calloc(1, sizeof(int));
+        PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) val);
+    }
+    *val = nb_acquired;
+}
+/* The map lock can be acquired recursively. So only the first rdlock
+ * will acquire the lock.
+ * A optimization acquires it at high level (op_shared_search), so that
+ * later calls during the operation processing will just increase/decrease a \
counter. + */
+void
+vattr_rdlock()
+{
+    int nb_acquire = global_vattr_lock_get_acquired_count();
+
+    if (nb_acquire == 0) {
+        /* The lock was not held just acquire it */
+        slapi_rwlock_rdlock(the_map->lock);
+    }
+    nb_acquire++;
+    global_vattr_lock_set_acquired_count(nb_acquire);
+
+}
+/* The map lock can be acquired recursively. So only the last unlock
+ * will release the lock.
+ * A optimization acquires it at high level (op_shared_search), so that
+ * later calls during the operation processing will just increase/decrease a \
counter. + */
+void
+vattr_rd_unlock()
+{
+    int nb_acquire = global_vattr_lock_get_acquired_count();
 
+    if (nb_acquire >= 1) {
+        nb_acquire--;
+        if (nb_acquire == 0) {
+            slapi_rwlock_unlock(the_map->lock);
+        }
+        global_vattr_lock_set_acquired_count(nb_acquire);
+    } else {
+        /* this is likely the consequence of lock acquire in read during an internal \
search +         * but the search callback updated the map and release the readlock \
and acquired +         * it in write.
+         * So after the update the lock was no longer held but when completing the \
internal +         * search we release the global read lock, that now has nothing to \
do +         */
+        slapi_log_err(SLAPI_LOG_DEBUG,
+          "vattr_rd_unlock", "vattr lock no longer acquired in read.\n");
+    }
+}
+
+/* The map lock is acquired in write (updating the map)
+ * It exists a possibility that lock is acquired in write while it is already
+ * hold in read by this thread (internal search with updating callback)
+ * In such situation, the we must abandon the read global lock and acquire in write
+ */
+void
+vattr_wrlock()
+{
+    int nb_read_acquire = global_vattr_lock_get_acquired_count();
+
+    if (nb_read_acquire) {
+        /* The lock was acquired in read but we need it in write
+         * release it and set the global vattr_lock counter to 0
+         */
+        slapi_rwlock_unlock(the_map->lock);
+        global_vattr_lock_set_acquired_count(0);
+    }
+    slapi_rwlock_wrlock(the_map->lock);
+}
+/* The map lock is release from a write write (updating the map)
+ */
+void
+vattr_wr_unlock()
+{
+    int nb_read_acquire = global_vattr_lock_get_acquired_count();
+
+    if (nb_read_acquire) {
+        /* The lock being acquired in write, the private thread counter
+         * (that count the number of time it was acquired in read) should be 0
+         */
+        slapi_log_err(SLAPI_LOG_INFO,
+          "vattr_unlock", "The lock was acquired in write. We should not be \
here\n"); +        PR_ASSERT(nb_read_acquire == 0);
+    }
+    slapi_rwlock_unlock(the_map->lock);
+}
 /* Called on server shutdown, free all structures, inform service providers that \
we're going down etc */  void
 vattr_cleanup()
@@ -1811,15 +1950,6 @@ typedef struct _vattr_map_entry vattr_map_entry;
 
 vattr_map_entry test_entry = {NULL};
 
-struct _vattr_map
-{
-    Slapi_RWLock *lock;
-    PLHashTable *hashtable; /* Hash table */
-};
-typedef struct _vattr_map vattr_map;
-
-static vattr_map *the_map = NULL;
-
 static PRIntn
 vattr_hash_compare_keys(const void *v1, const void *v2)
 {
@@ -1939,11 +2069,11 @@ vattr_map_lookup(const char *type_to_find, vattr_map_entry \
**result)  }
 
     /* Get the reader lock */
-    slapi_rwlock_rdlock(the_map->lock);
+    vattr_rdlock();
     *result = (vattr_map_entry *)PL_HashTableLookupConst(the_map->hashtable,
                                                          (void *)basetype);
     /* Release ze lock */
-    slapi_rwlock_unlock(the_map->lock);
+    vattr_rd_unlock();
 
     if (tmp) {
         slapi_ch_free_string(&tmp);
@@ -1962,13 +2092,13 @@ vattr_map_insert(vattr_map_entry *vae)
 {
     PR_ASSERT(the_map);
     /* Get the writer lock */
-    slapi_rwlock_wrlock(the_map->lock);
+    vattr_wrlock();
     /* Insert the thing */
     /* It's illegal to call this function if the entry is already there */
     PR_ASSERT(NULL == PL_HashTableLookupConst(the_map->hashtable, (void \
                *)vae->type_name));
     PL_HashTableAdd(the_map->hashtable, (void *)vae->type_name, (void *)vae);
     /* Unlock and we're done */
-    slapi_rwlock_unlock(the_map->lock);
+    vattr_wr_unlock();
     return 0;
 }
 
@@ -2105,13 +2235,13 @@ schema_changed_callback(Slapi_Entry *e \
__attribute__((unused)),  void *caller_data __attribute__((unused)))
 {
     /* Get the writer lock */
-    slapi_rwlock_wrlock(the_map->lock);
+    vattr_wrlock();
 
     /* go through the list */
     PL_HashTableEnumerateEntries(the_map->hashtable, vattr_map_entry_rebuild_schema, \
0);  
     /* Unlock and we're done */
-    slapi_rwlock_unlock(the_map->lock);
+    vattr_wr_unlock();
 }
 
 
@@ -2131,7 +2261,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
                 objAttrValue *obj;
 
                 if (0 == vattr_map_lookup(type, &map_entry)) {
-                    slapi_rwlock_rdlock(the_map->lock);
+                    vattr_rdlock();
 
                     obj = map_entry->objectclasses;
 
@@ -2148,7 +2278,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
                         obj = obj->pNext;
                     }
 
-                    slapi_rwlock_unlock(the_map->lock);
+                    vattr_rd_unlock();
                 }
 
                 slapi_valueset_free(vs);

-- 
To stop receiving notification emails like this one, please contact
the administrator of this repository.
_______________________________________________
389-commits mailing list -- 389-commits@lists.fedoraproject.org
To unsubscribe send an email to 389-commits-leave@lists.fedoraproject.org
Fedora Code of Conduct: https://getfedora.org/code-of-conduct.html
List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines
List Archives: https://lists.fedoraproject.org/archives/list/389-commits@lists.fedoraproject.org



[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic