summaryrefslogtreecommitdiff
path: root/drivers/gpu/host1x/intr.c
blob: b3285dd101804c59e05b4ea82746d35f3652519d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Tegra host1x Interrupt Management
 *
 * Copyright (c) 2010-2021, NVIDIA Corporation.
 */

#include <linux/clk.h>
#include <linux/interrupt.h>
#include "dev.h"
#include "fence.h"
#include "intr.h"

static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
					  struct host1x_syncpt_fence *fence)
{
	struct host1x_syncpt_fence *fence_in_list;

	list_for_each_entry_reverse(fence_in_list, &list->list, list) {
		if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
			/* Fence in list is before us, we can insert here */
			list_add(&fence->list, &fence_in_list->list);
			return;
		}
	}

	/* Add as first in list */
	list_add(&fence->list, &list->list);
}

static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
{
	struct host1x_syncpt_fence *fence;

	if (!list_empty(&sp->fences.list)) {
		fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);

		host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
		host1x_hw_intr_enable_syncpt_intr(host, sp->id);
	} else {
		host1x_hw_intr_disable_syncpt_intr(host, sp->id);
	}
}

void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
{
	struct host1x_fence_list *fence_list = &fence->sp->fences;

	INIT_LIST_HEAD(&fence->list);

	host1x_intr_add_fence_to_list(fence_list, fence);
	host1x_intr_update_hw_state(host, fence->sp);
}

bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
{
	struct host1x_fence_list *fence_list = &fence->sp->fences;
	unsigned long irqflags;

	spin_lock_irqsave(&fence_list->lock, irqflags);

	if (list_empty(&fence->list)) {
		spin_unlock_irqrestore(&fence_list->lock, irqflags);
		return false;
	}

	list_del_init(&fence->list);
	host1x_intr_update_hw_state(host, fence->sp);

	spin_unlock_irqrestore(&fence_list->lock, irqflags);

	return true;
}

void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
{
	struct host1x_syncpt *sp = &host->syncpt[id];
	struct host1x_syncpt_fence *fence, *tmp;
	unsigned int value;

	value = host1x_syncpt_load(sp);

	spin_lock(&sp->fences.lock);

	list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
		if (((value - fence->threshold) & 0x80000000U) != 0U) {
			/* Fence is not yet expired, we are done */
			break;
		}

		list_del_init(&fence->list);
		host1x_fence_signal(fence);
	}

	/* Re-enable interrupt if necessary */
	host1x_intr_update_hw_state(host, sp);

	spin_unlock(&sp->fences.lock);
}

int host1x_intr_init(struct host1x *host)
{
	struct host1x_intr_irq_data *irq_data;
	unsigned int id;
	int i, err;

	mutex_init(&host->intr_mutex);

	for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
		struct host1x_syncpt *syncpt = &host->syncpt[id];

		spin_lock_init(&syncpt->fences.lock);
		INIT_LIST_HEAD(&syncpt->fences.list);
	}

	irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
	if (!irq_data)
		return -ENOMEM;

	host1x_hw_intr_disable_all_syncpt_intrs(host);

	for (i = 0; i < host->num_syncpt_irqs; i++) {
		irq_data[i].host = host;
		irq_data[i].offset = i;

		err = devm_request_irq(host->dev, host->syncpt_irqs[i],
				       host->intr_op->isr, IRQF_SHARED,
				       "host1x_syncpt", &irq_data[i]);
		if (err < 0)
			return err;
	}

	return 0;
}

void host1x_intr_deinit(struct host1x *host)
{
}

void host1x_intr_start(struct host1x *host)
{
	u32 hz = clk_get_rate(host->clk);
	int err;

	mutex_lock(&host->intr_mutex);
	err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
	if (err) {
		mutex_unlock(&host->intr_mutex);
		return;
	}
	mutex_unlock(&host->intr_mutex);
}

void host1x_intr_stop(struct host1x *host)
{
	host1x_hw_intr_disable_all_syncpt_intrs(host);
}