xen/arch/x86/mm/mm-locks.h | 46 ++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 46 insertions(+), 0 deletions(-)
Declare an order-enforcing construct for a lock used in the mm layer
that is not of type mm_lock_t. This is useful whenever the mm layer
takes locks from other subsystems, or locks not implemented as
mm_lock_t.
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
diff -r 75f1e156386d -r 81eedccb3a85 xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -70,6 +70,27 @@ static inline void _mm_lock(mm_lock_t *l
panic("mm lock already held by %s\n", l->locker_function);
__set_lock_level(level);
}
+
+static inline void _mm_enforce_order_lock_pre(int level)
+{
+ __check_lock_level(level);
+}
+
+static inline void _mm_enforce_order_lock_post(int level, int *unlock_level,
+ unsigned short *recurse_count)
+{
+ if ( recurse_count )
+ {
+ if ( *recurse_count++ == 0 )
+ {
+ *unlock_level = __get_lock_level();
+ }
+ } else {
+ *unlock_level = __get_lock_level();
+ }
+ __set_lock_level(level);
+}
+
/* This wrapper uses the line number to express the locking order below */
#define declare_mm_lock(name) \
static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\
@@ -78,6 +99,16 @@ static inline void _mm_lock(mm_lock_t *l
#define mm_lock(name, l) mm_lock_##name(l, __func__, 0)
#define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1)
+/* This wrapper is intended for "external" locks which do not use
+ * the mm_lock_t types. Such locks inside the mm code are also subject
+ * to ordering constraints. */
+#define declare_mm_order_constraint(name) \
+ static inline void mm_enforce_order_lock_pre_##name(void) \
+ { _mm_enforce_order_lock_pre(__LINE__); } \
+ static inline void mm_enforce_order_lock_post_##name( \
+ int *unlock_level, unsigned short *recurse_count) \
+ { _mm_enforce_order_lock_post(__LINE__, unlock_level, recurse_count); } \
+
static inline void mm_unlock(mm_lock_t *l)
{
if ( l->lock.recurse_cnt == 1 )
@@ -88,6 +119,21 @@ static inline void mm_unlock(mm_lock_t *
spin_unlock_recursive(&l->lock);
}
+static inline void mm_enforce_order_unlock(int unlock_level,
+ unsigned short *recurse_count)
+{
+ if ( recurse_count )
+ {
+ BUG_ON(*recurse_count == 0);
+ if ( *recurse_count-- == 1 )
+ {
+ __set_lock_level(unlock_level);
+ }
+ } else {
+ __set_lock_level(unlock_level);
+ }
+}
+
/************************************************************************
* *
* To avoid deadlocks, these locks _MUST_ be taken in the order they're *
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|