summaryrefslogtreecommitdiffstats
path: root/security/smack/smack.h
diff options
context:
space:
mode:
authorTomasz Stanislawski <t.stanislaws@samsung.com>2013-06-11 14:55:13 +0200
committerCasey Schaufler <casey@schaufler-ca.com>2013-08-02 01:55:20 +0200
commit4d7cf4a1f49f76f4069114ee08be75cd68c37c5a (patch)
treef57088bc8a93d2c1345eb8e50822f74dffff6350 /security/smack/smack.h
parentsecurity: smack: fix memleak in smk_write_rules_list() (diff)
downloadlinux-4d7cf4a1f49f76f4069114ee08be75cd68c37c5a.tar.xz
linux-4d7cf4a1f49f76f4069114ee08be75cd68c37c5a.zip
security: smack: add a hash table to quicken smk_find_entry()
Accepted for the smack-next tree after changing the number of slots from 128 to 16. This patch adds a hash table to quicken searching of a smack label by its name. Basically, the patch improves performance of SMACK initialization. Parsing of rules involves translation from a string to a smack_known (aka label) entity which is done in smk_find_entry(). The current implementation of the function iterates over a global list of smack_known resulting in O(N) complexity for smk_find_entry(). The total complexity of SMACK initialization becomes O(rules * labels). Therefore it scales quadratically with a complexity of a system. Applying the patch reduced the complexity of smk_find_entry() to O(1) as long as number of label is in hundreds. If the number of labels is increased please update SMACK_HASH_SLOTS constant defined in security/smack/smack.h. Introducing the configuration of this constant with Kconfig or cmdline might be a good idea. The size of the hash table was adjusted experimentally. The rule set used by TIZEN contains circa 17K rules for 500 labels. The table above contains results of SMACK initialization using 'time smackctl apply' bash command. The 'Ref' is a kernel without this patch applied. The consecutive values refers to value of SMACK_HASH_SLOTS. Every measurement was repeated three times to reduce noise. | Ref | 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128 | 256 | 512 -------------------------------------------------------------------------------------------- Run1 | 1.156 | 1.096 | 0.883 | 0.764 | 0.692 | 0.667 | 0.649 | 0.633 | 0.634 | 0.629 | 0.620 Run2 | 1.156 | 1.111 | 0.885 | 0.764 | 0.694 | 0.661 | 0.649 | 0.651 | 0.634 | 0.638 | 0.623 Run3 | 1.160 | 1.107 | 0.886 | 0.764 | 0.694 | 0.671 | 0.661 | 0.638 | 0.631 | 0.624 | 0.638 AVG | 1.157 | 1.105 | 0.885 | 0.764 | 0.693 | 0.666 | 0.653 | 0.641 | 0.633 | 0.630 | 0.627 Surprisingly, a single hlist is slightly faster than a double-linked list. The speed-up saturates near 64 slots. Therefore I chose value 128 to provide some margin if more labels were used. It looks that IO becomes a new bottleneck. Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
Diffstat (limited to 'security/smack/smack.h')
-rw-r--r--security/smack/smack.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 339614c76e63..e80597a3048a 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -53,6 +53,7 @@
*/
struct smack_known {
struct list_head list;
+ struct hlist_node smk_hashed;
char *smk_known;
u32 smk_secid;
struct netlbl_lsm_secattr smk_netlabel; /* on wire labels */
@@ -222,6 +223,7 @@ char *smk_parse_smack(const char *string, int len);
int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
char *smk_import(const char *, int);
struct smack_known *smk_import_entry(const char *, int);
+void smk_insert_entry(struct smack_known *skp);
struct smack_known *smk_find_entry(const char *);
u32 smack_to_secid(const char *);
@@ -247,6 +249,9 @@ extern struct list_head smk_netlbladdr_list;
extern struct security_operations smack_ops;
+#define SMACK_HASH_SLOTS 16
+extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+
/*
* Is the directory transmuting?
*/