summaryrefslogtreecommitdiff
path: root/drivers/md/dm-vdo/funnel-queue.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-vdo/funnel-queue.h')
-rw-r--r--drivers/md/dm-vdo/funnel-queue.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/md/dm-vdo/funnel-queue.h b/drivers/md/dm-vdo/funnel-queue.h
index 88a30c593fdc..bde0f1deff98 100644
--- a/drivers/md/dm-vdo/funnel-queue.h
+++ b/drivers/md/dm-vdo/funnel-queue.h
@@ -3,8 +3,8 @@
* Copyright 2023 Red Hat
*/
-#ifndef UDS_FUNNEL_QUEUE_H
-#define UDS_FUNNEL_QUEUE_H
+#ifndef VDO_FUNNEL_QUEUE_H
+#define VDO_FUNNEL_QUEUE_H
#include <linux/atomic.h>
#include <linux/cache.h>
@@ -25,19 +25,19 @@
* the queue entries, and pointers to those structures are used exclusively by the queue. No macros
* are defined to template the queue, so the offset of the funnel_queue_entry in the records placed
* in the queue must all be the same so the client can derive their structure pointer from the
- * entry pointer returned by uds_funnel_queue_poll().
+ * entry pointer returned by vdo_funnel_queue_poll().
*
* Callers are wholly responsible for allocating and freeing the entries. Entries may be freed as
* soon as they are returned since this queue is not susceptible to the "ABA problem" present in
* many lock-free data structures. The queue is dynamically allocated to ensure cache-line
* alignment, but no other dynamic allocation is used.
*
- * The algorithm is not actually 100% lock-free. There is a single point in uds_funnel_queue_put()
+ * The algorithm is not actually 100% lock-free. There is a single point in vdo_funnel_queue_put()
* at which a preempted producer will prevent the consumers from seeing items added to the queue by
* later producers, and only if the queue is short enough or the consumer fast enough for it to
* reach what was the end of the queue at the time of the preemption.
*
- * The consumer function, uds_funnel_queue_poll(), will return NULL when the queue is empty. To
+ * The consumer function, vdo_funnel_queue_poll(), will return NULL when the queue is empty. To
* wait for data to consume, spin (if safe) or combine the queue with a struct event_count to
* signal the presence of new entries.
*/
@@ -51,7 +51,7 @@ struct funnel_queue_entry {
/*
* The dynamically allocated queue structure, which is allocated on a cache line boundary so the
* producer and consumer fields in the structure will land on separate cache lines. This should be
- * consider opaque but it is exposed here so uds_funnel_queue_put() can be inlined.
+ * consider opaque but it is exposed here so vdo_funnel_queue_put() can be inlined.
*/
struct __aligned(L1_CACHE_BYTES) funnel_queue {
/*
@@ -67,9 +67,9 @@ struct __aligned(L1_CACHE_BYTES) funnel_queue {
struct funnel_queue_entry stub;
};
-int __must_check uds_make_funnel_queue(struct funnel_queue **queue_ptr);
+int __must_check vdo_make_funnel_queue(struct funnel_queue **queue_ptr);
-void uds_free_funnel_queue(struct funnel_queue *queue);
+void vdo_free_funnel_queue(struct funnel_queue *queue);
/*
* Put an entry on the end of the queue.
@@ -79,7 +79,7 @@ void uds_free_funnel_queue(struct funnel_queue *queue);
* from the pointer that passed in here, so every entry in the queue must have the struct
* funnel_queue_entry at the same offset within the client's structure.
*/
-static inline void uds_funnel_queue_put(struct funnel_queue *queue,
+static inline void vdo_funnel_queue_put(struct funnel_queue *queue,
struct funnel_queue_entry *entry)
{
struct funnel_queue_entry *previous;
@@ -101,10 +101,10 @@ static inline void uds_funnel_queue_put(struct funnel_queue *queue,
WRITE_ONCE(previous->next, entry);
}
-struct funnel_queue_entry *__must_check uds_funnel_queue_poll(struct funnel_queue *queue);
+struct funnel_queue_entry *__must_check vdo_funnel_queue_poll(struct funnel_queue *queue);
-bool __must_check uds_is_funnel_queue_empty(struct funnel_queue *queue);
+bool __must_check vdo_is_funnel_queue_empty(struct funnel_queue *queue);
-bool __must_check uds_is_funnel_queue_idle(struct funnel_queue *queue);
+bool __must_check vdo_is_funnel_queue_idle(struct funnel_queue *queue);
-#endif /* UDS_FUNNEL_QUEUE_H */
+#endif /* VDO_FUNNEL_QUEUE_H */