1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
|
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-170.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v170
*
* Copyright (C) 2020-2021 Linaro Ltd.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss.h"
#include "camss-vfe.h"
#define VFE_HW_VERSION (0x000)
#define VFE_GLOBAL_RESET_CMD (0x018)
#define GLOBAL_RESET_CMD_CORE BIT(0)
#define GLOBAL_RESET_CMD_CAMIF BIT(1)
#define GLOBAL_RESET_CMD_BUS BIT(2)
#define GLOBAL_RESET_CMD_BUS_BDG BIT(3)
#define GLOBAL_RESET_CMD_REGISTER BIT(4)
#define GLOBAL_RESET_CMD_PM BIT(5)
#define GLOBAL_RESET_CMD_BUS_MISR BIT(6)
#define GLOBAL_RESET_CMD_TESTGEN BIT(7)
#define GLOBAL_RESET_CMD_DSP BIT(8)
#define GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
#define GLOBAL_RESET_CMD_RDI0 BIT(10)
#define GLOBAL_RESET_CMD_RDI1 BIT(11)
#define GLOBAL_RESET_CMD_RDI2 BIT(12)
#define GLOBAL_RESET_CMD_RDI3 BIT(13)
#define GLOBAL_RESET_CMD_VFE_DOMAIN BIT(30)
#define GLOBAL_RESET_CMD_RESET_BYPASS BIT(31)
#define VFE_CORE_CFG (0x050)
#define CFG_PIXEL_PATTERN_YCBYCR (0x4)
#define CFG_PIXEL_PATTERN_YCRYCB (0x5)
#define CFG_PIXEL_PATTERN_CBYCRY (0x6)
#define CFG_PIXEL_PATTERN_CRYCBY (0x7)
#define CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
#define VFE_IRQ_CMD (0x058)
#define CMD_GLOBAL_CLEAR BIT(0)
#define VFE_IRQ_MASK_0 (0x05c)
#define MASK_0_CAMIF_SOF BIT(0)
#define MASK_0_CAMIF_EOF BIT(1)
#define MASK_0_RDI_REG_UPDATE(n) BIT((n) + 5)
#define MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define MASK_0_RESET_ACK BIT(31)
#define VFE_IRQ_MASK_1 (0x060)
#define MASK_1_CAMIF_ERROR BIT(0)
#define MASK_1_VIOLATION BIT(7)
#define MASK_1_BUS_BDG_HALT_ACK BIT(8)
#define MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
#define MASK_1_RDI_SOF(n) BIT((n) + 29)
#define VFE_IRQ_CLEAR_0 (0x064)
#define VFE_IRQ_CLEAR_1 (0x068)
#define VFE_IRQ_STATUS_0 (0x06c)
#define STATUS_0_CAMIF_SOF BIT(0)
#define STATUS_0_RDI_REG_UPDATE(n) BIT((n) + 5)
#define STATUS_0_IMAGE_MASTER_PING_PONG(n) BIT((n) + 8)
#define STATUS_0_IMAGE_COMPOSITE_DONE(n) BIT((n) + 25)
#define STATUS_0_RESET_ACK BIT(31)
#define VFE_IRQ_STATUS_1 (0x070)
#define STATUS_1_VIOLATION BIT(7)
#define STATUS_1_BUS_BDG_HALT_ACK BIT(8)
#define STATUS_1_RDI_SOF(n) BIT((n) + 27)
#define VFE_VIOLATION_STATUS (0x07c)
#define VFE_CAMIF_CMD (0x478)
#define CMD_CLEAR_CAMIF_STATUS BIT(2)
#define VFE_CAMIF_CFG (0x47c)
#define CFG_VSYNC_SYNC_EDGE (0)
#define VSYNC_ACTIVE_HIGH (0)
#define VSYNC_ACTIVE_LOW (1)
#define CFG_HSYNC_SYNC_EDGE (1)
#define HSYNC_ACTIVE_HIGH (0)
#define HSYNC_ACTIVE_LOW (1)
#define CFG_VFE_SUBSAMPLE_ENABLE BIT(4)
#define CFG_BUS_SUBSAMPLE_ENABLE BIT(5)
#define CFG_VFE_OUTPUT_EN BIT(6)
#define CFG_BUS_OUTPUT_EN BIT(7)
#define CFG_BINNING_EN BIT(9)
#define CFG_FRAME_BASED_EN BIT(10)
#define CFG_RAW_CROP_EN BIT(22)
#define VFE_REG_UPDATE_CMD (0x4ac)
#define REG_UPDATE_RDI(n) BIT(1 + (n))
#define VFE_BUS_IRQ_MASK(n) (0x2044 + (n) * 4)
#define VFE_BUS_IRQ_CLEAR(n) (0x2050 + (n) * 4)
#define VFE_BUS_IRQ_STATUS(n) (0x205c + (n) * 4)
#define STATUS0_COMP_RESET_DONE BIT(0)
#define STATUS0_COMP_REG_UPDATE0_DONE BIT(1)
#define STATUS0_COMP_REG_UPDATE1_DONE BIT(2)
#define STATUS0_COMP_REG_UPDATE2_DONE BIT(3)
#define STATUS0_COMP_REG_UPDATE3_DONE BIT(4)
#define STATUS0_COMP_REG_UPDATE_DONE(n) BIT((n) + 1)
#define STATUS0_COMP0_BUF_DONE BIT(5)
#define STATUS0_COMP1_BUF_DONE BIT(6)
#define STATUS0_COMP2_BUF_DONE BIT(7)
#define STATUS0_COMP3_BUF_DONE BIT(8)
#define STATUS0_COMP4_BUF_DONE BIT(9)
#define STATUS0_COMP5_BUF_DONE BIT(10)
#define STATUS0_COMP_BUF_DONE(n) BIT((n) + 5)
#define STATUS0_COMP_ERROR BIT(11)
#define STATUS0_COMP_OVERWRITE BIT(12)
#define STATUS0_OVERFLOW BIT(13)
#define STATUS0_VIOLATION BIT(14)
/* WM_CLIENT_BUF_DONE defined for buffers 0:19 */
#define STATUS1_WM_CLIENT_BUF_DONE(n) BIT(n)
#define STATUS1_EARLY_DONE BIT(24)
#define STATUS2_DUAL_COMP0_BUF_DONE BIT(0)
#define STATUS2_DUAL_COMP1_BUF_DONE BIT(1)
#define STATUS2_DUAL_COMP2_BUF_DONE BIT(2)
#define STATUS2_DUAL_COMP3_BUF_DONE BIT(3)
#define STATUS2_DUAL_COMP4_BUF_DONE BIT(4)
#define STATUS2_DUAL_COMP5_BUF_DONE BIT(5)
#define STATUS2_DUAL_COMP_BUF_DONE(n) BIT(n)
#define STATUS2_DUAL_COMP_ERROR BIT(6)
#define STATUS2_DUAL_COMP_OVERWRITE BIT(7)
#define VFE_BUS_IRQ_CLEAR_GLOBAL (0x2068)
#define VFE_BUS_WM_DEBUG_STATUS_CFG (0x226c)
#define DEBUG_STATUS_CFG_STATUS0(n) BIT(n)
#define DEBUG_STATUS_CFG_STATUS1(n) BIT(8 + (n))
#define VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER (0x2080)
#define VFE_BUS_WM_ADDR_SYNC_NO_SYNC (0x2084)
#define BUS_VER2_MAX_CLIENTS (24)
#define WM_ADDR_NO_SYNC_DEFAULT_VAL \
((1 << BUS_VER2_MAX_CLIENTS) - 1)
#define VFE_BUS_WM_CGC_OVERRIDE (0x200c)
#define WM_CGC_OVERRIDE_ALL (0xFFFFF)
#define VFE_BUS_WM_TEST_BUS_CTRL (0x211c)
#define VFE_BUS_WM_STATUS0(n) (0x2200 + (n) * 0x100)
#define VFE_BUS_WM_STATUS1(n) (0x2204 + (n) * 0x100)
#define VFE_BUS_WM_CFG(n) (0x2208 + (n) * 0x100)
#define WM_CFG_EN (0)
#define WM_CFG_MODE (1)
#define MODE_QCOM_PLAIN (0)
#define MODE_MIPI_RAW (1)
#define WM_CFG_VIRTUALFRAME (2)
#define VFE_BUS_WM_HEADER_ADDR(n) (0x220c + (n) * 0x100)
#define VFE_BUS_WM_HEADER_CFG(n) (0x2210 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_ADDR(n) (0x2214 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_ADDR_OFFSET(n) (0x2218 + (n) * 0x100)
#define VFE_BUS_WM_BUFFER_WIDTH_CFG(n) (0x221c + (n) * 0x100)
#define WM_BUFFER_DEFAULT_WIDTH (0xFF01)
#define VFE_BUS_WM_BUFFER_HEIGHT_CFG(n) (0x2220 + (n) * 0x100)
#define VFE_BUS_WM_PACKER_CFG(n) (0x2224 + (n) * 0x100)
#define VFE_BUS_WM_STRIDE(n) (0x2228 + (n) * 0x100)
#define WM_STRIDE_DEFAULT_STRIDE (0xFF01)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (0x2248 + (n) * 0x100)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (0x224c + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (0x2250 + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (0x2254 + (n) * 0x100)
#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
u32 gen = (hw_version >> 28) & 0xF;
u32 rev = (hw_version >> 16) & 0xFFF;
u32 step = hw_version & 0xFFFF;
dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
gen, rev, step);
return hw_version;
}
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits | set_bits, vfe->base + reg);
}
static void vfe_global_reset(struct vfe_device *vfe)
{
u32 reset_bits = GLOBAL_RESET_CMD_CORE |
GLOBAL_RESET_CMD_CAMIF |
GLOBAL_RESET_CMD_BUS |
GLOBAL_RESET_CMD_BUS_BDG |
GLOBAL_RESET_CMD_REGISTER |
GLOBAL_RESET_CMD_TESTGEN |
GLOBAL_RESET_CMD_DSP |
GLOBAL_RESET_CMD_IDLE_CGC |
GLOBAL_RESET_CMD_RDI0 |
GLOBAL_RESET_CMD_RDI1 |
GLOBAL_RESET_CMD_RDI2;
writel_relaxed(BIT(31), vfe->base + VFE_IRQ_MASK_0);
/* Make sure IRQ mask has been written before resetting */
wmb();
writel_relaxed(reset_bits, vfe->base + VFE_GLOBAL_RESET_CMD);
}
static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
{
u32 val;
/*Set Debug Registers*/
val = DEBUG_STATUS_CFG_STATUS0(1) |
DEBUG_STATUS_CFG_STATUS0(7);
writel_relaxed(val, vfe->base + VFE_BUS_WM_DEBUG_STATUS_CFG);
/* BUS_WM_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
writel_relaxed(0, vfe->base + VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER);
/* no clock gating at bus input */
val = WM_CGC_OVERRIDE_ALL;
writel_relaxed(val, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
/* if addr_no_sync has default value then config the addr no sync reg */
val = WM_ADDR_NO_SYNC_DEFAULT_VAL;
writel_relaxed(val, vfe->base + VFE_BUS_WM_ADDR_SYNC_NO_SYNC);
writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
val = WM_BUFFER_DEFAULT_WIDTH;
writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_WIDTH_CFG(wm));
val = 0;
writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_HEIGHT_CFG(wm));
val = 0;
writel_relaxed(val, vfe->base + VFE_BUS_WM_PACKER_CFG(wm)); // XXX 1 for PLAIN8?
/* Configure stride for RDIs */
val = WM_STRIDE_DEFAULT_STRIDE;
writel_relaxed(val, vfe->base + VFE_BUS_WM_STRIDE(wm));
/* Enable WM */
val = 1 << WM_CFG_EN |
MODE_MIPI_RAW << WM_CFG_MODE;
writel_relaxed(val, vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
{
/* Disable WM */
writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
struct vfe_line *line)
{
struct v4l2_pix_format_mplane *pix =
&line->video_out.active_fmt.fmt.pix_mp;
u32 stride = pix->plane_fmt[0].bytesperline;
writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
writel_relaxed(stride * pix->height, vfe->base + VFE_BUS_WM_FRAME_INC(wm));
}
static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
vfe->reg_update |= REG_UPDATE_RDI(line_id);
/* Enforce ordering between previous reg writes and reg update */
wmb();
writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
/* Enforce ordering between reg update and subsequent reg writes */
wmb();
}
static inline void vfe_reg_update_clear(struct vfe_device *vfe,
enum vfe_line_id line_id)
{
vfe->reg_update &= ~REG_UPDATE_RDI(line_id);
}
static void vfe_enable_irq_common(struct vfe_device *vfe)
{
vfe_reg_set(vfe, VFE_IRQ_MASK_0, ~0u);
vfe_reg_set(vfe, VFE_IRQ_MASK_1, ~0u);
writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(0));
writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(1));
writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(2));
}
static void vfe_isr_halt_ack(struct vfe_device *vfe)
{
complete(&vfe->halt_complete);
}
static void vfe_isr_read(struct vfe_device *vfe, u32 *status0, u32 *status1)
{
*status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
*status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
writel_relaxed(*status0, vfe->base + VFE_IRQ_CLEAR_0);
writel_relaxed(*status1, vfe->base + VFE_IRQ_CLEAR_1);
/* Enforce ordering between IRQ Clear and Global IRQ Clear */
wmb();
writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
}
static void vfe_violation_read(struct vfe_device *vfe)
{
u32 violation = readl_relaxed(vfe->base + VFE_VIOLATION_STATUS);
pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
}
/*
* vfe_isr - VFE module interrupt handler
* @irq: Interrupt line
* @dev: VFE device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
u32 status0, status1, vfe_bus_status[3];
int i, wm;
status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
writel_relaxed(status0, vfe->base + VFE_IRQ_CLEAR_0);
writel_relaxed(status1, vfe->base + VFE_IRQ_CLEAR_1);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) {
vfe_bus_status[i] = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(i));
writel_relaxed(vfe_bus_status[i], vfe->base + VFE_BUS_IRQ_CLEAR(i));
}
/* Enforce ordering between IRQ reading and interpretation */
wmb();
writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
if (status0 & STATUS_0_RESET_ACK)
vfe->isr_ops.reset_ack(vfe);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
if (status0 & STATUS_0_RDI_REG_UPDATE(i))
vfe->isr_ops.reg_update(vfe, i);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
if (status0 & STATUS_1_RDI_SOF(i))
vfe->isr_ops.sof(vfe, i);
for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
if (vfe_bus_status[0] & STATUS0_COMP_BUF_DONE(i))
vfe->isr_ops.comp_done(vfe, i);
for (wm = 0; wm < MSM_VFE_IMAGE_MASTERS_NUM; wm++)
if (status0 & BIT(9))
if (vfe_bus_status[1] & STATUS1_WM_CLIENT_BUF_DONE(wm))
vfe->isr_ops.wm_done(vfe, wm);
return IRQ_HANDLED;
}
/*
* vfe_halt - Trigger halt on VFE module and wait to complete
* @vfe: VFE device
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_halt(struct vfe_device *vfe)
{
/* rely on vfe_disable_output() to stop the VFE */
return 0;
}
static int vfe_get_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
int wm_idx;
spin_lock_irqsave(&vfe->output_lock, flags);
output = &line->output;
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is running\n");
goto error;
}
output->wm_num = 1;
wm_idx = vfe_reserve_wm(vfe, line->id);
if (wm_idx < 0) {
dev_err(vfe->camss->dev, "Can not reserve wm\n");
goto error_get_wm;
}
output->wm_idx[0] = wm_idx;
output->drop_update_idx = 0;
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
error_get_wm:
vfe_release_wm(vfe, output->wm_idx[0]);
output->state = VFE_OUTPUT_OFF;
error:
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
}
static int vfe_enable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
const struct vfe_hw_ops *ops = vfe->ops;
struct media_entity *sensor;
unsigned long flags;
unsigned int frame_skip = 0;
unsigned int i;
sensor = camss_find_sensor(&line->subdev.entity);
if (sensor) {
struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
/* Max frame skip is 29 frames */
if (frame_skip > VFE_FRAME_DROP_VAL - 1)
frame_skip = VFE_FRAME_DROP_VAL - 1;
}
spin_lock_irqsave(&vfe->output_lock, flags);
ops->reg_update_clear(vfe, line->id);
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
output->state);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
}
WARN_ON(output->gen2.active_num);
output->state = VFE_OUTPUT_ON;
output->sequence = 0;
output->wait_reg_update = 0;
reinit_completion(&output->reg_update);
vfe_wm_start(vfe, output->wm_idx[0], line);
for (i = 0; i < 2; i++) {
output->buf[i] = vfe_buf_get_pending(output);
if (!output->buf[i])
break;
output->gen2.active_num++;
vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
}
ops->reg_update(vfe, line->id);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
static int vfe_disable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
unsigned long flags;
unsigned int i;
bool done;
int timeout = 0;
do {
spin_lock_irqsave(&vfe->output_lock, flags);
done = !output->gen2.active_num;
spin_unlock_irqrestore(&vfe->output_lock, flags);
usleep_range(10000, 20000);
if (timeout++ == 100) {
dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
vfe_reset(vfe);
output->gen2.active_num = 0;
return 0;
}
} while (!done);
spin_lock_irqsave(&vfe->output_lock, flags);
for (i = 0; i < output->wm_num; i++)
vfe_wm_stop(vfe, output->wm_idx[i]);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
/*
* vfe_enable - Enable streaming on VFE line
* @line: VFE line
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_enable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
int ret;
mutex_lock(&vfe->stream_lock);
if (!vfe->stream_count)
vfe_enable_irq_common(vfe);
vfe->stream_count++;
mutex_unlock(&vfe->stream_lock);
ret = vfe_get_output(line);
if (ret < 0)
goto error_get_output;
ret = vfe_enable_output(line);
if (ret < 0)
goto error_enable_output;
vfe->was_streaming = 1;
return 0;
error_enable_output:
vfe_put_output(line);
error_get_output:
mutex_lock(&vfe->stream_lock);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return ret;
}
/*
* vfe_disable - Disable streaming on VFE line
* @line: VFE line
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_disable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
vfe_disable_output(line);
vfe_put_output(line);
mutex_lock(&vfe->stream_lock);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return 0;
}
/*
* vfe_isr_sof - Process start of frame interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
{
/* nop */
}
/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
struct vfe_output *output;
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe->ops->reg_update_clear(vfe, line_id);
output = &vfe->line[line_id].output;
if (output->wait_reg_update) {
output->wait_reg_update = 0;
complete(&output->reg_update);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_isr_wm_done - Process write master done interrupt
* @vfe: VFE Device
* @wm: Write master id
*/
static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
{
struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
struct camss_buffer *ready_buf;
struct vfe_output *output;
unsigned long flags;
u32 index;
u64 ts = ktime_get_ns();
spin_lock_irqsave(&vfe->output_lock, flags);
if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
dev_err_ratelimited(vfe->camss->dev,
"Received wm done for unmapped index\n");
goto out_unlock;
}
output = &vfe->line[vfe->wm_output_map[wm]].output;
ready_buf = output->buf[0];
if (!ready_buf) {
dev_err_ratelimited(vfe->camss->dev,
"Missing ready buf %d!\n", output->state);
goto out_unlock;
}
ready_buf->vb.vb2_buf.timestamp = ts;
ready_buf->vb.sequence = output->sequence++;
index = 0;
output->buf[0] = output->buf[1];
if (output->buf[0])
index = 1;
output->buf[index] = vfe_buf_get_pending(output);
if (output->buf[index])
vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
else
output->gen2.active_num--;
spin_unlock_irqrestore(&vfe->output_lock, flags);
vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
return;
out_unlock:
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_pm_domain_off - Disable power domains specific to this VFE.
* @vfe: VFE Device
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
if (vfe->id >= camss->vfe_num)
return;
device_link_del(camss->genpd_link[vfe->id]);
}
/*
* vfe_pm_domain_on - Enable power domains specific to this VFE.
* @vfe: VFE Device
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
if (id >= camss->vfe_num)
return 0;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[id])
return -EINVAL;
return 0;
}
/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
*
* Add an empty buffer - depending on the current number of buffers it will be
* put in pending buffer queue or directly given to the hardware to be filled.
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_queue_buffer(struct camss_video *vid,
struct camss_buffer *buf)
{
struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
output->buf[output->gen2.active_num++] = buf;
vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
} else {
vfe_buf_add_pending(output, buf);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
static const struct vfe_isr_ops vfe_isr_ops_170 = {
.reset_ack = vfe_isr_reset_ack,
.halt_ack = vfe_isr_halt_ack,
.reg_update = vfe_isr_reg_update,
.sof = vfe_isr_sof,
.comp_done = vfe_isr_comp_done,
.wm_done = vfe_isr_wm_done,
};
static const struct camss_video_ops vfe_video_ops_170 = {
.queue_buffer = vfe_queue_buffer,
.flush_buffers = vfe_flush_buffers,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->isr_ops = vfe_isr_ops_170;
vfe->video_ops = vfe_video_ops_170;
vfe->line_num = VFE_LINE_NUM_GEN2;
}
const struct vfe_hw_ops vfe_ops_170 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.reg_update_clear = vfe_reg_update_clear,
.reg_update = vfe_reg_update,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_disable,
.vfe_enable = vfe_enable,
.vfe_halt = vfe_halt,
.violation_read = vfe_violation_read,
};
|