// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Texas Instruments DS90UB960-Q1 video deserializer
*
* Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
* Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
*/
/*
* (Possible) TODOs:
*
* - PM for serializer and remote peripherals. We need to manage:
* - VPOC
* - Power domain? Regulator? Somehow any remote device should be able to
* cause the VPOC to be turned on.
* - Link between the deserializer and the serializer
* - Related to VPOC management. We probably always want to turn on the VPOC
* and then enable the link.
* - Serializer's services: i2c, gpios, power
* - The serializer needs to resume before the remote peripherals can
* e.g. use the i2c.
* - How to handle gpios? Reserving a gpio essentially keeps the provider
* (serializer) always powered on.
* - Do we need a new bus for the FPD-Link? At the moment the serializers
* are children of the same i2c-adapter where the deserializer resides.
* - i2c-atr could be made embeddable instead of allocatable.
*/
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c-atr.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/units.h>
#include <linux/workqueue.h>
#include <media/i2c/ds90ub9xx.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include "ds90ub953.h"
#define MHZ(v) ((u32)((v) * HZ_PER_MHZ))
/*
* If this is defined, the i2c addresses from UB960_DEBUG_I2C_RX_ID to
* UB960_DEBUG_I2C_RX_ID + 3 can be used to access the paged RX port registers
* directly.
*
* Only for debug purposes.
*/
/* #define UB960_DEBUG_I2C_RX_ID 0x40 */
#define UB960_POLL_TIME_MS 500
#define UB960_MAX_RX_NPORTS 4
#define UB960_MAX_TX_NPORTS 2
#define UB960_MAX_NPORTS (UB960_MAX_RX_NPORTS + UB960_MAX_TX_NPORTS)
#define UB960_MAX_PORT_ALIASES 8
#define UB960_NUM_BC_GPIOS 4
/*
* Register map
*
* 0x00-0x32 Shared (UB960_SR)
* 0x33-0x3a CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR)
* 0x4c Shared (UB960_SR)
* 0x4d-0x7f FPD-Link RX, per-port paged (UB960_RR)
* 0xb0-0xbf Shared (UB960_SR)
* 0xd0-0xdf FPD-Link RX, per-port paged (UB960_RR)
* 0xf0-0xf5 Shared (UB960_SR)
* 0xf8-0xfb Shared (UB960_SR)
* All others Reserved
*
* Register prefixes:
* UB960_SR_* = Shared register
* UB960_RR_* = FPD-Link RX, per-port paged register
* UB960_TR_* = CSI-2 TX, per-port paged register
* UB960_XR_* = Reserved register
* UB960_IR_* = Indirect register
*/
#define UB960_SR_I2C_DEV_ID 0x00
#define UB960_SR_RESET 0x01
#define UB960_SR_RESET_DIGITAL_RESET1 BIT(1)
#define UB960_SR_RESET_DIGITAL_RESET0 BIT(0)
#define UB960_SR_RESET_GPIO_LOCK_RELEASE BIT(5)
#define UB960_SR_GEN_CONFIG 0x02
#define UB960_SR_REV_MASK 0x03
#define UB960_SR_DEVICE_STS 0x04
#define UB960_SR_PAR_ERR_THOLD_HI 0x05
#define UB960_SR_PAR_ERR_THOLD_LO 0x06
#define UB960_SR_BCC_WDOG_CTL 0x07
#define UB960_SR_I2C_CTL1 0x08
#define UB960_SR_I2C_CTL2 0x09
#define UB960_SR_SCL_HIGH_TIME 0x0a
#define UB960_SR_SCL_LOW_TIME 0x0b
#define UB960_SR_RX_PORT_CTL 0x0c
#define UB960_SR_IO_CTL 0x0d
#define UB960_SR_GPIO_PIN_STS 0x0e
#define UB960_SR_GPIO_INPUT_CTL 0x0f
#define UB960_SR_GPIO_PIN_CTL(n) (0x10 + (n)) /* n < UB960_NUM_GPIOS */
#define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SEL 5
#define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SRC_SHIFT 2
#define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_EN BIT(0)
#define UB960_SR_FS_CTL 0x18
#define UB960_SR_FS_HIGH_TIME_1 0x19
#define UB960_SR_FS_HIGH_TIME_0 0x1a
#define UB960_SR_FS_LOW_TIME_1 0x1b
#define UB960_SR_FS_LOW_TIME_0 0x1c
#define UB960_SR_MAX_FRM_HI 0x1d
#define UB960_SR_MAX_FRM_LO 0x1e
#define UB960_SR_CSI_PLL_CTL 0x1f
#define UB960_SR_FWD_CTL1 0x20
#define UB960_SR_FWD_CTL1_PORT_DIS(n) BIT((n) + 4)
#define UB960_SR_FWD_CTL2 0x21
#define UB960_SR_FWD_STS 0x22
#define UB960_SR_INTERRUPT_CTL 0x23
#define UB960_SR_INTERRUPT_CTL_INT_EN BIT(7)
#define UB960_SR_INTERRUPT_CTL_IE_CSI_TX0 BIT(4)
#define UB960_SR_INTERRUPT_CTL_IE_RX(n) BIT((n)) /* rxport[n] IRQ */
#define UB960_SR_INTERRUPT_STS 0x24
#define UB960_SR_INTERRUPT_STS_INT BIT(7)
#define UB960_SR_INTERRUPT_STS_IS_CSI_TX(n) BIT(4 + (n)) /* txport[n] IRQ */
#define UB960_SR_INTERRUPT_STS_IS_RX(n) BIT((n)) /* rxport[n] IRQ */
#define UB960_SR_TS_CONFIG 0x25
#define UB960_SR_TS_CONTROL 0x26
#define UB960_SR_TS_LINE_HI 0x27
#define UB960_SR_TS_LINE_LO 0x28
#define UB960_SR_TS_STATUS 0x29
#define UB960_SR_TIMESTAMP_P0_HI 0x2a
#define UB960_SR_TIMESTAMP_P0_LO 0x2b
#define UB960_SR_TIMESTAMP_P1_HI 0x2c
#define UB960_SR_TIMESTAMP_P1_LO 0x2d
#define UB960_SR_CSI_PORT_SEL 0x32
#define UB960_TR_CSI_CTL 0x33
#define UB960_TR_CSI_CTL_CSI_CAL_EN BIT(6)
#define UB960_TR_CSI_CTL_CSI_CONTS_CLOCK BIT(1)
#define UB960_TR_CSI_CTL_CSI_ENABLE BIT(0)
#define UB960_TR_CSI_CTL2 0x34
#define UB960_TR_CSI_STS 0x35
#define UB960_TR_CSI_TX_ICR 0x36
#define UB960_TR_CSI_TX_ISR 0x37
#define UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR BIT(3)
#define UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR BIT(1)
#define UB960_TR_CSI_TEST_CTL 0x38
#define UB960_TR_CSI_TEST_PATT_HI 0x39
#define UB960_TR_CSI_TEST_PATT_LO 0x3a
#define UB960_XR_SFILTER_CFG 0x41
#define UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT 4
#define UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT 0
#define UB960_XR_AEQ_CTL1 0x42
#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK BIT(6)
#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING BIT(5)
#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY BIT(4)
#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK \
(UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK | \
UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING | \
UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY)
#define UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN BIT(0)
#define UB960_XR_AEQ_ERR_THOLD 0x43
#define UB960_RR_BCC_ERR_CTL 0x46
#define UB960_RR_BCC_STATUS 0x47
#define UB960_RR_BCC_STATUS_SEQ_ERROR BIT(5)
#define UB960_RR_BCC_STATUS_MASTER_ERR BIT(4)
#define UB960_RR_BCC_STATUS_MASTER_TO BIT(3)
#define UB960_RR_BCC_STATUS_SLAVE_ERR BIT(2)
#define UB960_RR_BCC_STATUS_SLAVE_TO BIT(1)
#define UB960_RR_BCC_STATUS_RESP_ERR BIT(0)
#define UB960_RR_BCC_STATUS_ERROR_MASK \
(UB960_RR_BCC_STATUS_SEQ_ERROR | UB960_RR_BCC_STATUS_MASTER_ERR | \
UB960_RR_BCC_STATUS_MASTER_TO | UB960_RR_BCC_STATUS_SLAVE_ERR | \
UB960_RR_BCC_STATUS_SLAVE_TO | UB960_RR_BCC_STATUS_RESP_ERR)
#define UB960_RR_FPD3_CAP 0x4a
#define UB960_RR_RAW_EMBED_DTYPE 0x4b
#define UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT 6
#define UB960_SR_FPD3_PORT_SEL 0x4c
#define UB960_RR_RX_PORT_STS1 0x4d
#define UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR BIT(5)
#define UB960_RR_RX_PORT_STS1_LOCK_STS_CHG BIT(4)
#define UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR BIT(3)
#define UB960_RR_RX_PORT_STS1_PARITY_ERROR BIT(2)
#define UB960_RR_RX_PORT_STS1_PORT_PASS BIT(1)
#define UB960_RR_RX_PORT_STS1_LOCK_STS BIT(0)
#define UB960_RR_RX_PORT_STS1_ERROR_MASK \
(UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR | \
UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR | \
UB960_RR_RX_PORT_STS1_PARITY_ERROR)
#define UB960_RR_RX_PORT_STS2 0x4e
#define UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE BIT(7)
#define UB960_RR_RX_PORT_STS2_LINE_LEN_CHG BIT(6)
#define UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR BIT(5)
#define UB960_RR_RX_PORT_STS2_BUFFER_ERROR BIT(4)
#define UB960_RR_RX_PORT_STS2_CSI_ERROR BIT(3)
#define UB960_RR_RX_PORT_STS2_FREQ_STABLE BIT(2)
#define UB960_RR_RX_PORT_STS2_CABLE_FAULT BIT(1)
#define UB960_RR_RX_PORT_STS2_LINE_CNT_CHG BIT(0)
#define UB960_RR_RX_PORT_STS2_ERROR_MASK \
UB960_RR_RX_PORT_STS2_BUFFER_ERROR
#define UB960_RR_RX_FREQ_HIGH 0x4f
#define UB960_RR_RX_FREQ_LOW 0x50
#define UB960_RR_SENSOR_STS_0 0x51
#define UB960_RR_SENSOR_STS_1 0x52
#define UB960_RR_SENSOR_STS_2 0x53
#define UB960_RR_SENSOR_STS_3 0x54
#define UB960_RR_RX_PAR_ERR_HI 0x55
#define UB960_RR_RX_PAR_ERR_LO 0x56
#define UB960_RR_BIST_ERR_COUNT 0x57
#define UB960_RR_BCC_CONFIG 0x58
#define UB960_RR_BCC_CONFIG_BC_ALWAYS_ON BIT(4)
#define UB960_RR_BCC_CONFIG_AUTO_ACK_ALL BIT(5)
#define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH BIT(6)
#define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK GENMASK(2, 0)
#define UB960_RR_DATAPATH_CTL1 0x59
#define UB960_RR_DATAPATH_CTL2 0x5a
#define UB960_RR_SER_ID 0x5b
#define UB960_RR_SER_ID_FREEZE_DEVICE_ID BIT(0)
#define UB960_RR_SER_ALIAS_ID 0x5c
#define UB960_RR_SER_ALIAS_ID_AUTO_ACK BIT(0)
/* For these two register sets: n < UB960_MAX_PORT_ALIASES */
#define UB960_RR_SLAVE_ID(n) (0x5d + (n))
#define UB960_RR_SLAVE_ALIAS(n) (0x65 + (n))
#define UB960_RR_PORT_CONFIG 0x6d
#define UB960_RR_PORT_CONFIG_FPD3_MODE_MASK GENMASK(1, 0)
#define UB960_RR_BC_GPIO_CTL(n) (0x6e + (n)) /* n < 2 */
#define UB960_RR_RAW10_ID 0x70
#define UB960_RR_RAW10_ID_VC_SHIFT 6
#define UB960_RR_RAW10_ID_DT_SHIFT 0
#define UB960_RR_RAW12_ID 0x71
#define UB960_RR_CSI_VC_MAP 0x72
#define UB960_RR_CSI_VC_MAP_SHIFT(x) ((x) * 2)
#define UB960_RR_LINE_COUNT_HI 0x73
#define UB960_RR_LINE_COUNT_LO 0x74
#define UB960_RR_LINE_LEN_1 0x75
#define UB960_RR_LINE_LEN_0 0x76
#define UB960_RR_FREQ_DET_CTL 0x77
#define UB960_RR_MAILBOX_1 0x78
#define UB960_RR_MAILBOX_2 0x79
#define UB960_RR_CSI_RX_STS 0x7a
#define UB960_RR_CSI_RX_STS_LENGTH_ERR BIT(3)
#define UB960_RR_CSI_RX_STS_CKSUM_ERR BIT(2)
#define UB960_RR_CSI_RX_STS_ECC2_ERR BIT(1)
#define UB960_RR_CSI_RX_STS_ECC1_ERR BIT(0)
#define UB960_RR_CSI_RX_STS_ERROR_MASK \
(UB960_RR_CSI_RX_STS_LENGTH_ERR | UB960_RR_CSI_RX_STS_CKSUM_ERR | \
UB960_RR_CSI_RX_STS_ECC2_ERR | UB960_RR_CSI_RX_STS_ECC1_ERR)
#define UB960_RR_CSI_ERR_COUNTER 0x7b
#define UB960_RR_PORT_CONFIG2 0x7c
#define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK GENMASK(7, 6)
#define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT 6
#define UB960_RR_PORT_CONFIG2_LV_POL_LOW BIT(1)
#define UB960_RR_PORT_CONFIG2_FV_POL_LOW BIT(0)
#define UB960_RR_PORT_PASS_CTL 0x7d
#define UB960_RR_SEN_INT_RISE_CTL 0x7e
#define UB960_RR_SEN_INT_FALL_CTL 0x7f
#define UB960_SR_CSI_FRAME_COUNT_HI(n) (0x90 + 8 * (n))
#define UB960_SR_CSI_FRAME_COUNT_LO(n) (0x91 + 8 * (n))
#define UB960_SR_CSI_FRAME_ERR_COUNT_HI(n) (0x92 + 8 * (n))
#define UB960_SR_CSI_FRAME_ERR_COUNT_LO(n) (0x93 + 8 * (n))
#define UB960_SR_CSI_LINE_COUNT_HI(n) (0x94 + 8 * (n))
#define UB960_SR_CSI_LINE_COUNT_LO(n) (0x95 + 8 * (n))
#define UB960_SR_CSI_LINE_ERR_COUNT_HI(n) (0x96 + 8 * (n))
#define UB960_SR_CSI_LINE_ERR_COUNT_LO(n) (0x97 + 8 * (n))
#define UB960_XR_REFCLK_FREQ 0xa5 /* UB960 */
#define UB960_SR_IND_ACC_CTL 0xb0
#define UB960_SR_IND_ACC_CTL_IA_AUTO_INC BIT(1)
#define UB960_SR_IND_ACC_ADDR 0xb1
#define UB960_SR_IND_ACC_DATA 0xb2
#define UB960_SR_BIST_CONTROL 0xb3
#define UB960_SR_MODE_IDX_STS 0xb8
#define UB960_SR_LINK_ERROR_COUNT 0xb9
#define UB960_SR_FPD3_ENC_CTL 0xba
#define UB960_SR_FV_MIN_TIME 0xbc
#define UB960_SR_GPIO_PD_CTL 0xbe
#define UB960_RR_PORT_DEBUG 0xd0
#define UB960_RR_AEQ_CTL2 0xd2
#define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR BIT(2)
#define UB960_RR_AEQ_STATUS 0xd3
#define UB960_RR_AEQ_STATUS_STATUS_2 GENMASK(5, 3)
#define UB960_RR_AEQ_STATUS_STATUS_1 GENMASK(2, 0)
#define UB960_RR_AEQ_BYPASS 0xd4
#define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT 5
#define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK GENMASK(7, 5)
#define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT 1
#define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK GENMASK(3, 1)
#define UB960_RR_AEQ_BYPASS_ENABLE BIT(0)
#define UB960_RR_AEQ_MIN_MAX 0xd5
#define UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT 4
#define UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT 0
#define UB960_RR_SFILTER_STS_0 0xd6
#define UB960_RR_SFILTER_STS_1 0xd7
#define UB960_RR_PORT_ICR_HI 0xd8
#define UB960_RR_PORT_ICR_LO 0xd9
#define UB960_RR_PORT_ISR_HI 0xda
#define UB960_RR_PORT_ISR_LO 0xdb
#define UB960_RR_FC_GPIO_STS 0xdc
#define UB960_RR_FC_GPIO_ICR 0xdd
#define UB960_RR_SEN_INT_RISE_STS 0xde
#define UB960_RR_SEN_INT_FALL_STS 0xdf
#define UB960_SR_FPD3_RX_ID(n) (0xf0 + (n))
#define UB960_SR_FPD3_RX_ID_LEN 6
#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n))
/* Indirect register blocks */
#define UB960_IND_TARGET_PAT_GEN 0x00
#define UB960_IND_TARGET_RX_ANA(n) (0x01 + (n))
#define UB960_IND_TARGET_CSI_ANA 0x07
/* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
#define UB960_IR_PGEN_CTL 0x01
#define UB960_IR_PGEN_CTL_PGEN_ENABLE BIT(0)
#define UB960_IR_PGEN_CFG 0x02
#define UB960_IR_PGEN_CSI_DI 0x03
#define UB960_IR_PGEN_LINE_SIZE1 0x04
#define UB960_IR_PGEN_LINE_SIZE0 0x05
#define UB960_IR_PGEN_BAR_SIZE1 0x06
#define UB960_IR_PGEN_BAR_SIZE0 0x07
#define UB960_IR_PGEN_ACT_LPF1 0x08
#define UB960_IR_PGEN_ACT_LPF0 0x09
#define UB960_IR_PGEN_TOT_LPF1 0x0a
#define UB960_IR_PGEN_TOT_LPF0 0x0b
#define UB960_IR_PGEN_LINE_PD1 0x0c
#define UB960_IR_PGEN_LINE_PD0 0x0d
#define UB960_IR_PGEN_VBP 0x0e
#define UB960_IR_PGEN_VFP 0x0f
#define UB960_IR_PGEN_COLOR(n) (0x10 + (n)) /* n < 15 */
#define UB960_IR_RX_ANA_STROBE_SET_CLK 0x08
#define UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY BIT(3)
#define UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK GENMASK(2, 0)
#define UB960_IR_RX_ANA_STROBE_SET_DATA 0x09
#define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY BIT(3)
#define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK GENMASK(2, 0)
/* UB9702 Registers */
#define UB9702_SR_CSI_EXCLUSIVE_FWD2 0x3c
#define UB9702_SR_REFCLK_FREQ 0x3d
#define UB9702_RR_RX_CTL_1 0x80
#define UB9702_RR_RX_CTL_2 0x87
#define UB9702_RR_VC_ID_MAP(x) (0xa0 + (x))
#define UB9702_SR_FPD_RATE_CFG 0xc2
#define UB9702_SR_CSI_PLL_DIV 0xc9
#define UB9702_RR_RX_SM_SEL_2 0xd4
#define UB9702_RR_CHANNEL_MODE 0xe4
#define UB9702_IND_TARGET_SAR_ADC 0x0a
#define UB9702_IR_RX_ANA_FPD_BC_CTL0 0x04
#define UB9702_IR_RX_ANA_FPD_BC_CTL1 0x0d
#define UB9702_IR_RX_ANA_FPD_BC_CTL2 0x1b
#define UB9702_IR_RX_ANA_SYSTEM_INIT_REG0 0x21
#define UB9702_IR_RX_ANA_AEQ_ALP_SEL6 0x27
#define UB9702_IR_RX_ANA_AEQ_ALP_SEL7 0x28
#define UB9702_IR_RX_ANA_AEQ_ALP_SEL10 0x2b
#define UB9702_IR_RX_ANA_AEQ_ALP_SEL11 0x2c
#define UB9702_IR_RX_ANA_EQ_ADAPT_CTRL 0x2e
#define UB9702_IR_RX_ANA_AEQ_CFG_1 0x34
#define UB9702_IR_RX_ANA_AEQ_CFG_2 0x4d
#define UB9702_IR_RX_ANA_GAIN_CTRL_0 0x71
#define UB9702_IR_RX_ANA_GAIN_CTRL_0 0x71
#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_1 0x72
#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_2 0x73
#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_3 0x74
#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_6 0x77
#define UB9702_IR_RX_ANA_AEQ_CFG_3 0x79
#define UB9702_IR_RX_ANA_AEQ_CFG_4 0x85
#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_15 0x87
#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_24 0x90
#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_38 0x9e
#define UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5 0xa5
#define UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1 0xa8
#define UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL 0xf0
#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_8 0xf1
#define UB9702_IR_CSI_ANA_CSIPLL_REG_1 0x92
/* EQ related */
#define UB960_MIN_AEQ_STROBE_POS -7
#define UB960_MAX_AEQ_STROBE_POS 7
#define UB960_MANUAL_STROBE_EXTRA_DELAY 6
#define UB960_MIN_MANUAL_STROBE_POS -(7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
#define UB960_MAX_MANUAL_STROBE_POS (7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
#define UB960_NUM_MANUAL_STROBE_POS (UB960_MAX_MANUAL_STROBE_POS - UB960_MIN_MANUAL_STROBE_POS + 1)
#define UB960_MIN_EQ_LEVEL 0
#define UB960_MAX_EQ_LEVEL 14
#define UB960_NUM_EQ_LEVELS (UB960_MAX_EQ_LEVEL - UB960_MIN_EQ_LEVEL + 1)
struct ub960_hw_data {
const char *model;
u8 num_rxports;
u8 num_txports;
bool is_ub9702;
bool is_fpdlink4;
};
enum ub960_rxport_mode {
RXPORT_MODE_RAW10 = 0,
RXPORT_MODE_RAW12_HF = 1,
RXPORT_MODE_RAW12_LF = 2,
RXPORT_MODE_CSI2_SYNC = 3,
RXPORT_MODE_CSI2_NONSYNC = 4,
RXPORT_MODE_LAST = RXPORT_MODE_CSI2_NONSYNC,
};
enum ub960_rxport_cdr {
RXPORT_CDR_FPD3 = 0,
RXPORT_CDR_FPD4 = 1,
RXPORT_CDR_LAST = RXPORT_CDR_FPD4,
};
struct ub960_rxport {
struct ub960_data *priv;
u8 nport; /* RX port number, and index in priv->rxport[] */
struct {
struct v4l2_subdev *sd;
u16 pad;
struct fwnode_handle *ep_fwnode;
} source;
/* Serializer */
struct {
struct fwnode_handle *fwnode;
struct i2c_client *client;
unsigned short alias; /* I2C alias (lower 7 bits) */
short addr; /* Local I2C address (lower 7 bits) */
struct ds90ub9xx_platform_data pdata;
struct regmap *regmap;
} ser;
enum ub960_rxport_mode rx_mode;
enum ub960_rxport_cdr cdr_mode;
u8 lv_fv_pol; /* LV and FV polarities */
struct regulator *vpoc;
/* EQ settings */
struct {
bool manual_eq;
s8 strobe_pos;
union {
struct {
u8 eq_level_min;
u8 eq_level_max;
} aeq;
struct {
u8 eq_level;
} manual;
};
} eq;
/* lock for aliased_addrs and associated registers */
struct mutex aliased_addrs_lock;
u16 aliased_addrs[UB960_MAX_PORT_ALIASES];
};
struct ub960_asd {
struct v4l2_async_connection base;
struct ub960_rxport *rxport;
};
static inline struct ub960_asd *to_ub960_asd(struct v4l2_async_connection *asd)
{
return container_of(asd, struct ub960_asd, base);
}
struct ub960_txport {
struct ub960_data *priv;
u8 nport; /* TX port number, and index in priv->txport[] */
u32 num_data_lanes;
bool non_continous_clk;
};
struct ub960_data {
const struct ub960_hw_data *hw_data;
struct i2c_client *client; /* for shared local registers */
struct regmap *regmap;
/* lock for register access */
struct mutex reg_lock;
struct clk *refclk;
struct regulator *vddio;
struct gpio_desc *pd_gpio;
struct delayed_work poll_work;
struct ub960_rxport *rxports[UB960_MAX_RX_NPORTS];
struct ub960_txport *txports[UB960_MAX_TX_NPORTS];
struct v4l2_subdev sd;
struct media_pad pads[UB960_MAX_NPORTS];
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
u32 tx_data_rate; /* Nominal data rate (Gb/s) */
s64 tx_link_freq[1];
struct i2c_atr *atr;
struct {
u8 rxport;
u8 txport;
u8 indirect_target;
} reg_current;
bool streaming;
u8 stored_fwd_ctl;
u64 stream_enable_mask[UB960_MAX_NPORTS];
/* These are common to all ports */
struct {
bool manual;
s8 min;
s8 max;
} strobe;
};
static inline struct ub960_data *sd_to_ub960(struct v4l2_subdev *sd)
{
return container_of(sd, struct ub960_data, sd);
}
static inline bool ub960_pad_is_sink(struct ub960_data *priv, u32 pad)
{
return pad < priv->hw_data->num_rxports;
}
static inline bool ub960_pad_is_source(struct ub960_data *priv, u32 pad)
{
return pad >= priv->hw_data->num_rxports;
}
static inline unsigned int ub960_pad_to_port(struct ub960_data *priv, u32 pad)
{
if (ub960_pad_is_sink(priv, pad))
return pad;
else
return pad - priv->hw_data->num_rxports;
}
struct ub960_format_info {
u32 code;
u32 bpp;
u8 datatype;
bool meta;
};
static const struct ub960_format_info ub960_formats[] = {
{ .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .datatype = MIPI_CSI2_DT_RGB888, },
{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
{ .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
{ .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
{ .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
{ .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
{ .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
{ .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
{ .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
{ .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
{ .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
{ .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
{ .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
};
static const struct ub960_format_info *ub960_find_format(u32 code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ub960_formats); i++) {
if (ub960_formats[i].code == code)
return &ub960_formats[i];
}
return NULL;
}
struct ub960_rxport_iter {
unsigned int nport;
struct ub960_rxport *rxport;
};
enum ub960_iter_flags {
UB960_ITER_ACTIVE_ONLY = BIT(0),
UB960_ITER_FPD4_ONLY = BIT(1),
};
static struct ub960_rxport_iter ub960_iter_rxport(struct ub960_data *priv,
struct ub960_rxport_iter it,
enum ub960_iter_flags flags)
{
for (; it.nport < priv->hw_data->num_rxports; it.nport++) {
it.rxport = priv->rxports[it.nport];
if ((flags & UB960_ITER_ACTIVE_ONLY) && !it.rxport)
continue ;
if ((flags & UB960_ITER_FPD4_ONLY) &&
it.rxport->cdr_mode != RXPORT_CDR_FPD4)
continue ;
return it;
}
it.rxport = NULL;
return it;
}
#define for_each_rxport(priv, it) \
for (struct ub960_rxport_iter it = \
ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
0); \
it.nport < (priv)->hw_data->num_rxports; \
it.nport++, it = ub960_iter_rxport(priv, it, 0))
#define for_each_active_rxport(priv, it) \
for (struct ub960_rxport_iter it = \
ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
UB960_ITER_ACTIVE_ONLY); \
it.nport < (priv)->hw_data->num_rxports; \
it.nport++, it = ub960_iter_rxport(priv, it, \
UB960_ITER_ACTIVE_ONLY))
#define for_each_active_rxport_fpd4(priv, it) \
for (struct ub960_rxport_iter it = \
ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
UB960_ITER_ACTIVE_ONLY | \
UB960_ITER_FPD4_ONLY); \
it.nport < (priv)->hw_data->num_rxports; \
it.nport++, it = ub960_iter_rxport(priv, it, \
UB960_ITER_ACTIVE_ONLY | \
UB960_ITER_FPD4_ONLY))
/* -----------------------------------------------------------------------------
* Basic device access
*/
static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = regmap_read(priv->regmap, reg, &v);
if (ret) {
dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n" ,
__func__, reg, ret);
goto out_unlock;
}
*val = v;
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_write(struct ub960_data *priv, u8 reg, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = regmap_write(priv->regmap, reg, val);
if (ret)
dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n" ,
__func__, reg, ret);
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val,
int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = regmap_update_bits(priv->regmap, reg, mask, val);
if (ret)
dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n" ,
__func__, reg, ret);
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val, int *err)
{
struct device *dev = &priv->client->dev;
__be16 __v;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof (__v));
if (ret) {
dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n" ,
__func__, reg, ret);
goto out_unlock;
}
*val = be16_to_cpu(__v);
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
{
struct device *dev = &priv->client->dev;
int ret;
lockdep_assert_held(&priv->reg_lock);
if (priv->reg_current.rxport == nport)
return 0;
ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL,
(nport << 4) | BIT(nport));
if (ret) {
dev_err(dev, "%s: cannot select rxport %d (%d)!\n" , __func__,
nport, ret);
return ret;
}
priv->reg_current.rxport = nport;
return 0;
}
static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg,
u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
if (ret)
goto out_unlock;
ret = regmap_read(priv->regmap, reg, &v);
if (ret) {
dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n" ,
__func__, reg, ret);
goto out_unlock;
}
*val = v;
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg,
u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
if (ret)
goto out_unlock;
ret = regmap_write(priv->regmap, reg, val);
if (ret)
dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n" ,
__func__, reg, ret);
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
if (ret)
goto out_unlock;
ret = regmap_update_bits(priv->regmap, reg, mask, val);
if (ret)
dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n" ,
__func__, reg, ret);
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
u16 *val, int *err)
{
struct device *dev = &priv->client->dev;
__be16 __v;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
if (ret)
goto out_unlock;
ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof (__v));
if (ret) {
dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n" ,
__func__, reg, ret);
goto out_unlock;
}
*val = be16_to_cpu(__v);
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_txport_select(struct ub960_data *priv, u8 nport)
{
struct device *dev = &priv->client->dev;
int ret;
lockdep_assert_held(&priv->reg_lock);
if (priv->reg_current.txport == nport)
return 0;
ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL,
(nport << 4) | BIT(nport));
if (ret) {
dev_err(dev, "%s: cannot select tx port %d (%d)!\n" , __func__,
nport, ret);
return ret;
}
priv->reg_current.txport = nport;
return 0;
}
static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg,
u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
if (ret)
goto out_unlock;
ret = regmap_read(priv->regmap, reg, &v);
if (ret) {
dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n" ,
__func__, reg, ret);
goto out_unlock;
}
*val = v;
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg,
u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
if (ret)
goto out_unlock;
ret = regmap_write(priv->regmap, reg, val);
if (ret)
dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n" ,
__func__, reg, ret);
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
if (ret)
goto out_unlock;
ret = regmap_update_bits(priv->regmap, reg, mask, val);
if (ret)
dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n" ,
__func__, reg, ret);
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block)
{
struct device *dev = &priv->client->dev;
int ret;
lockdep_assert_held(&priv->reg_lock);
if (priv->reg_current.indirect_target == block)
return 0;
ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_CTL, block << 2);
if (ret) {
dev_err(dev, "%s: cannot select indirect target %u (%d)!\n" ,
__func__, block, ret);
return ret;
}
priv->reg_current.indirect_target = block;
return 0;
}
static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val,
int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
if (ret)
goto out_unlock;
ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
if (ret) {
dev_err(dev,
"Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n" ,
block, reg, ret);
goto out_unlock;
}
ret = regmap_read(priv->regmap, UB960_SR_IND_ACC_DATA, &v);
if (ret) {
dev_err(dev,
"Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n" ,
block, reg, ret);
goto out_unlock;
}
*val = v;
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val,
int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
if (ret)
goto out_unlock;
ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
if (ret) {
dev_err(dev,
"Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n" ,
block, reg, ret);
goto out_unlock;
}
ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_DATA, val);
if (ret) {
dev_err(dev,
"Write to IND_ACC_DATA failed when writing %u:%x02x: %d\n" ,
block, reg, ret);
goto out_unlock;
}
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
if (err && *err)
return *err;
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
if (ret)
goto out_unlock;
ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
if (ret) {
dev_err(dev,
"Write to IND_ACC_ADDR failed when updating %u:%x02x: %d\n" ,
block, reg, ret);
goto out_unlock;
}
ret = regmap_update_bits(priv->regmap, UB960_SR_IND_ACC_DATA, mask,
val);
if (ret) {
dev_err(dev,
"Write to IND_ACC_DATA failed when updating %u:%x02x: %d\n" ,
block, reg, ret);
goto out_unlock;
}
out_unlock:
mutex_unlock(&priv->reg_lock);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_reset(struct ub960_data *priv, bool reset_regs)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
u8 bit;
bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
UB960_SR_RESET_DIGITAL_RESET0;
ret = ub960_write(priv, UB960_SR_RESET, bit, NULL);
if (ret)
return ret;
mutex_lock(&priv->reg_lock);
ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
(v & bit) == 0, 2000, 100000);
mutex_unlock(&priv->reg_lock);
if (ret)
dev_err(dev, "reset failed: %d\n" , ret);
return ret;
}
/* -----------------------------------------------------------------------------
* I2C-ATR (address translator)
*/
static int ub960_atr_attach_addr(struct i2c_atr *atr, u32 chan_id,
u16 addr, u16 alias)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
struct device *dev = &priv->client->dev;
unsigned int reg_idx;
int ret = 0;
guard(mutex)(&rxport->aliased_addrs_lock);
for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
if (!rxport->aliased_addrs[reg_idx])
break ;
}
if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: alias pool exhausted\n" , rxport->nport);
return -EADDRNOTAVAIL;
}
rxport->aliased_addrs[reg_idx] = addr;
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
addr << 1, &ret);
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
alias << 1, &ret);
if (ret)
return ret;
dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n" ,
rxport->nport, addr, alias, reg_idx);
return 0;
}
static void ub960_atr_detach_addr(struct i2c_atr *atr, u32 chan_id,
u16 addr)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
struct device *dev = &priv->client->dev;
unsigned int reg_idx;
int ret;
guard(mutex)(&rxport->aliased_addrs_lock);
for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
if (rxport->aliased_addrs[reg_idx] == addr)
break ;
}
if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: client 0x%02x is not mapped!\n" ,
rxport->nport, addr);
return ;
}
rxport->aliased_addrs[reg_idx] = 0;
ret = ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
0, NULL);
if (ret) {
dev_err(dev, "rx%u: unable to fully unmap client 0x%02x: %d\n" ,
rxport->nport, addr, ret);
return ;
}
dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n" , rxport->nport,
addr, reg_idx);
}
static const struct i2c_atr_ops ub960_atr_ops = {
.attach_addr = ub960_atr_attach_addr,
.detach_addr = ub960_atr_detach_addr,
};
static int ub960_init_atr(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
struct i2c_adapter *parent_adap = priv->client->adapter;
priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
priv->hw_data->num_rxports, 0);
if (IS_ERR(priv->atr))
return PTR_ERR(priv->atr);
i2c_atr_set_driver_data(priv->atr, priv);
return 0;
}
static void ub960_uninit_atr(struct ub960_data *priv)
{
i2c_atr_delete(priv->atr);
priv->atr = NULL;
}
/* -----------------------------------------------------------------------------
* TX ports
*/
static int ub960_parse_dt_txport(struct ub960_data *priv,
struct fwnode_handle *ep_fwnode,
u8 nport)
{
struct device *dev = &priv->client->dev;
struct v4l2_fwnode_endpoint vep = {};
struct ub960_txport *txport;
int ret;
txport = kzalloc(sizeof (*txport), GFP_KERNEL);
if (!txport)
return -ENOMEM;
txport->priv = priv;
txport->nport = nport;
vep.bus_type = V4L2_MBUS_CSI2_DPHY;
ret = v4l2_fwnode_endpoint_alloc_parse(ep_fwnode, &vep);
if (ret) {
dev_err(dev, "tx%u: failed to parse endpoint data\n" , nport);
goto err_free_txport;
}
txport->non_continous_clk = vep.bus.mipi_csi2.flags &
V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
txport->num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
if (vep.nr_of_link_frequencies != 1) {
ret = -EINVAL;
goto err_free_vep;
}
priv->tx_link_freq[0] = vep.link_frequencies[0];
priv->tx_data_rate = priv->tx_link_freq[0] * 2;
if (priv->tx_data_rate != MHZ(1600) &&
priv->tx_data_rate != MHZ(1200) &&
priv->tx_data_rate != MHZ(800) &&
priv->tx_data_rate != MHZ(400)) {
dev_err(dev, "tx%u: invalid 'link-frequencies' value\n" , nport);
ret = -EINVAL;
goto err_free_vep;
}
v4l2_fwnode_endpoint_free(&vep);
priv->txports[nport] = txport;
return 0;
err_free_vep:
v4l2_fwnode_endpoint_free(&vep);
err_free_txport:
kfree(txport);
return ret;
}
static int ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
{
struct device *dev = &priv->client->dev;
u8 csi_tx_isr;
int ret;
ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr,
NULL);
if (ret)
return ret;
if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n" , nport);
if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
dev_warn(dev, "TX%u: CSI_PASS_ERROR\n" , nport);
return 0;
}
/* -----------------------------------------------------------------------------
* RX ports
*/
static int ub960_rxport_enable_vpocs(struct ub960_data *priv)
{
unsigned int failed_nport;
int ret;
for_each_active_rxport(priv, it) {
if (!it.rxport->vpoc)
continue ;
ret = regulator_enable(it.rxport->vpoc);
if (ret) {
failed_nport = it.nport;
goto err_disable_vpocs;
}
}
return 0;
err_disable_vpocs:
while (failed_nport--) {
struct ub960_rxport *rxport = priv->rxports[failed_nport];
if (!rxport || !rxport->vpoc)
continue ;
regulator_disable(rxport->vpoc);
}
return ret;
}
static void ub960_rxport_disable_vpocs(struct ub960_data *priv)
{
for_each_active_rxport(priv, it) {
if (!it.rxport->vpoc)
continue ;
regulator_disable(it.rxport->vpoc);
}
}
static int ub960_rxport_clear_errors(struct ub960_data *priv,
unsigned int nport)
{
int ret = 0;
u8 v;
ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v, &ret);
ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v, &ret);
ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v, &ret);
ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v, &ret);
ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v, &ret);
ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v, &ret);
ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v, &ret);
return ret;
}
static int ub960_clear_rx_errors(struct ub960_data *priv)
{
int ret;
for_each_rxport(priv, it) {
ret = ub960_rxport_clear_errors(priv, it.nport);
if (ret)
return ret;
}
return 0;
}
static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
unsigned int nport, s8 *strobe_pos)
{
u8 v;
u8 clk_delay, data_delay;
int ret;
ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB960_IR_RX_ANA_STROBE_SET_CLK, &v, NULL);
if (ret)
return ret;
clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB960_IR_RX_ANA_STROBE_SET_DATA, &v, NULL);
if (ret)
return ret;
data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v, NULL);
if (ret)
return ret;
clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v, NULL);
if (ret)
return ret;
data_delay += v & UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK;
*strobe_pos = data_delay - clk_delay;
return 0;
}
static int ub960_rxport_set_strobe_pos(struct ub960_data *priv,
unsigned int nport, s8 strobe_pos)
{
u8 clk_delay, data_delay;
int ret = 0;
clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
if (strobe_pos < UB960_MIN_AEQ_STROBE_POS)
clk_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY;
else if (strobe_pos > UB960_MAX_AEQ_STROBE_POS)
data_delay = strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY;
else if (strobe_pos < 0)
clk_delay = abs(strobe_pos) | UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
else if (strobe_pos > 0)
data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay, &ret);
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay, &ret);
return ret;
}
static int ub960_rxport_set_strobe_range(struct ub960_data *priv, s8 strobe_min,
s8 strobe_max)
{
/* Convert the signed strobe pos to positive zero based value */
strobe_min -= UB960_MIN_AEQ_STROBE_POS;
strobe_max -= UB960_MIN_AEQ_STROBE_POS;
return ub960_write(priv, UB960_XR_SFILTER_CFG,
((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT),
NULL);
}
static int ub960_rxport_get_eq_level(struct ub960_data *priv,
unsigned int nport, u8 *eq_level)
{
int ret;
u8 v;
ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v, NULL);
if (ret)
return ret;
*eq_level = (v & UB960_RR_AEQ_STATUS_STATUS_1) +
(v & UB960_RR_AEQ_STATUS_STATUS_2);
return 0;
}
static int ub960_rxport_set_eq_level(struct ub960_data *priv,
unsigned int nport, u8 eq_level)
{
u8 eq_stage_1_select_value, eq_stage_2_select_value;
const unsigned int eq_stage_max = 7;
int ret;
u8 v;
if (eq_level <= eq_stage_max) {
eq_stage_1_select_value = eq_level;
eq_stage_2_select_value = 0;
} else {
eq_stage_1_select_value = eq_stage_max;
eq_stage_2_select_value = eq_level - eq_stage_max;
}
ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
if (ret)
return ret;
v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK |
UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK);
v |= eq_stage_1_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT;
v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT;
v |= UB960_RR_AEQ_BYPASS_ENABLE;
ret = ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v, NULL);
if (ret)
return ret;
return 0;
}
static int ub960_rxport_set_eq_range(struct ub960_data *priv,
unsigned int nport, u8 eq_min, u8 eq_max)
{
int ret = 0;
ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX,
(eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) |
(eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT),
&ret);
/* Enable AEQ min setting */
ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2,
UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR,
UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR, &ret);
return ret;
}
static int ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
{
struct ub960_rxport *rxport = priv->rxports[nport];
int ret;
/* We also set common settings here. Should be moved elsewhere. */
if (priv->strobe.manual) {
/* Disable AEQ_SFILTER_EN */
ret = ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0,
NULL);
if (ret)
return ret;
} else {
/* Enable SFILTER and error control */
ret = ub960_write(priv, UB960_XR_AEQ_CTL1,
UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN,
NULL);
if (ret)
return ret;
/* Set AEQ strobe range */
ret = ub960_rxport_set_strobe_range(priv, priv->strobe.min,
priv->strobe.max);
if (ret)
return ret;
}
/* The rest are port specific */
if (priv->strobe.manual)
ret = ub960_rxport_set_strobe_pos(priv, nport,
rxport->eq.strobe_pos);
else
ret = ub960_rxport_set_strobe_pos(priv, nport, 0);
if (ret)
return ret;
if (rxport->eq.manual_eq) {
ret = ub960_rxport_set_eq_level(priv, nport,
rxport->eq.manual.eq_level);
if (ret)
return ret;
/* Enable AEQ Bypass */
ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
UB960_RR_AEQ_BYPASS_ENABLE,
UB960_RR_AEQ_BYPASS_ENABLE,
NULL);
if (ret)
return ret;
} else {
ret = ub960_rxport_set_eq_range(priv, nport,
rxport->eq.aeq.eq_level_min,
rxport->eq.aeq.eq_level_max);
if (ret)
return ret;
/* Disable AEQ Bypass */
ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
UB960_RR_AEQ_BYPASS_ENABLE, 0,
NULL);
if (ret)
return ret;
}
return 0;
}
static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
bool *ok)
{
u8 rx_port_sts1, rx_port_sts2;
u16 parity_errors;
u8 csi_rx_sts;
u8 csi_err_cnt;
u8 bcc_sts;
int ret;
bool errors;
ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
&rx_port_sts1, NULL);
if (ret)
return ret;
if (!(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS)) {
*ok = false ;
return 0;
}
ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
&rx_port_sts2, NULL);
if (ret)
return ret;
ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts,
NULL);
if (ret)
return ret;
ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
&csi_err_cnt, NULL);
if (ret)
return ret;
ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts,
NULL);
if (ret)
return ret;
ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
&parity_errors, NULL);
if (ret)
return ret;
errors = (rx_port_sts1 & UB960_RR_RX_PORT_STS1_ERROR_MASK) ||
(rx_port_sts2 & UB960_RR_RX_PORT_STS2_ERROR_MASK) ||
(bcc_sts & UB960_RR_BCC_STATUS_ERROR_MASK) ||
(csi_rx_sts & UB960_RR_CSI_RX_STS_ERROR_MASK) || csi_err_cnt ||
parity_errors;
*ok = !errors;
return 0;
}
static int ub960_rxport_lockup_wa_ub9702(struct ub960_data *priv)
{
int ret;
/* Toggle PI_MODE to avoid possible FPD RX lockup */
ret = ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
2 << 3, NULL);
if (ret)
return ret;
usleep_range(1000, 5000);
return ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
0, NULL);
}
/*
* Wait for the RX ports to lock, have no errors and have stable strobe position
* and EQ level.
*/
static int ub960_rxport_wait_locks(struct ub960_data *priv,
unsigned long port_mask,
unsigned int *lock_mask)
{
struct device *dev = &priv->client->dev;
unsigned long timeout;
unsigned int link_ok_mask;
unsigned int missing;
unsigned int loops;
u8 nport;
int ret;
if (port_mask == 0) {
if (lock_mask)
*lock_mask = 0;
return 0;
}
if (port_mask >= BIT(priv->hw_data->num_rxports))
return -EINVAL;
timeout = jiffies + msecs_to_jiffies(1000);
loops = 0;
link_ok_mask = 0;
while (time_before(jiffies, timeout)) {
bool fpd4_wa = false ;
missing = 0;
for_each_set_bit(nport, &port_mask,
priv->hw_data->num_rxports) {
struct ub960_rxport *rxport = priv->rxports[nport];
bool ok;
if (!rxport)
continue ;
ret = ub960_rxport_link_ok(priv, nport, &ok);
if (ret)
return ret;
if (!ok && rxport->cdr_mode == RXPORT_CDR_FPD4)
fpd4_wa = true ;
/*
* We want the link to be ok for two consecutive loops,
* as a link could get established just before our test
* and drop soon after.
*/
if (!ok || !(link_ok_mask & BIT(nport)))
missing++;
if (ok)
link_ok_mask |= BIT(nport);
else
link_ok_mask &= ~BIT(nport);
}
loops++;
if (missing == 0)
break ;
if (fpd4_wa) {
ret = ub960_rxport_lockup_wa_ub9702(priv);
if (ret)
return ret;
}
/*
* The sleep time of 10 ms was found by testing to give a lock
* with a few iterations. It can be decreased if on some setups
* the lock can be achieved much faster.
*/
fsleep(10 * USEC_PER_MSEC);
}
if (lock_mask)
*lock_mask = link_ok_mask;
dev_dbg(dev, "Wait locks done in %u loops\n" , loops);
for_each_set_bit(nport, &port_mask, priv->hw_data->num_rxports) {
struct ub960_rxport *rxport = priv->rxports[nport];
s8 strobe_pos, eq_level;
u16 v;
if (!rxport)
continue ;
if (!(link_ok_mask & BIT(nport))) {
dev_dbg(dev, "\trx%u: not locked\n" , nport);
continue ;
}
ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
&v, NULL);
if (ret)
return ret;
if (priv->hw_data->is_ub9702) {
dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n" ,
nport, ((u64)v * HZ_PER_MHZ) >> 8);
} else {
ret = ub960_rxport_get_strobe_pos(priv, nport,
&strobe_pos);
if (ret)
return ret;
ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
if (ret)
return ret;
dev_dbg(dev,
"\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n" ,
nport, strobe_pos, eq_level,
((u64)v * HZ_PER_MHZ) >> 8);
}
}
return 0;
}
static unsigned long ub960_calc_bc_clk_rate_ub960(struct ub960_data *priv,
struct ub960_rxport *rxport)
{
unsigned int mult;
unsigned int div;
switch (rxport->rx_mode) {
case RXPORT_MODE_RAW10:
case RXPORT_MODE_RAW12_HF:
case RXPORT_MODE_RAW12_LF:
mult = 1;
div = 10;
break ;
case RXPORT_MODE_CSI2_SYNC:
mult = 2;
div = 1;
break ;
case RXPORT_MODE_CSI2_NONSYNC:
mult = 2;
div = 5;
break ;
default :
return 0;
}
return clk_get_rate(priv->refclk) * mult / div;
}
static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv,
struct ub960_rxport *rxport)
{
switch (rxport->rx_mode) {
case RXPORT_MODE_RAW10:
case RXPORT_MODE_RAW12_HF:
case RXPORT_MODE_RAW12_LF:
return 2359400;
case RXPORT_MODE_CSI2_SYNC:
return 47187500;
case RXPORT_MODE_CSI2_NONSYNC:
return 9437500;
default :
return 0;
}
}
static int ub960_rxport_serializer_write(struct ub960_rxport *rxport, u8 reg,
u8 val, int *err)
{
struct ub960_data *priv = rxport->priv;
struct device *dev = &priv->client->dev;
union i2c_smbus_data data;
int ret;
if (err && *err)
return *err;
data.byte = val;
ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias, 0,
I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &data);
if (ret)
dev_err(dev,
"rx%u: cannot write serializer register 0x%02x (%d)!\n" ,
rxport->nport, reg, ret);
if (ret && err)
*err = ret;
return ret;
}
static int ub960_rxport_serializer_read(struct ub960_rxport *rxport, u8 reg,
u8 *val, int *err)
{
struct ub960_data *priv = rxport->priv;
struct device *dev = &priv->client->dev;
union i2c_smbus_data data = { 0 };
int ret;
if (err && *err)
return *err;
ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias,
priv->client->flags, I2C_SMBUS_READ, reg,
I2C_SMBUS_BYTE_DATA, &data);
if (ret)
dev_err(dev,
"rx%u: cannot read serializer register 0x%02x (%d)!\n" ,
rxport->nport, reg, ret);
else
*val = data.byte;
if (ret && err)
*err = ret;
return ret;
}
static int ub960_serializer_temp_ramp(struct ub960_rxport *rxport)
{
struct ub960_data *priv = rxport->priv;
short temp_dynamic_offset[] = {-1, -1, 0, 0, 1, 1, 1, 3};
u8 temp_dynamic_cfg;
u8 nport = rxport->nport;
u8 ser_temp_code;
int ret = 0;
/* Configure temp ramp only on UB953 */
if (!fwnode_device_is_compatible(rxport->ser.fwnode, "ti,ds90ub953-q1" ))
return 0;
/* Read current serializer die temperature */
ub960_rxport_read(priv, nport, UB960_RR_SENSOR_STS_2, &ser_temp_code,
&ret);
/* Enable I2C passthrough on back channel */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
if (ret)
return ret;
/* Select indirect page for analog regs on the serializer */
ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_CTL,
UB953_IND_TARGET_ANALOG << 2, &ret);
/* Set temperature ramp dynamic and static config */
ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
ub960_rxport_serializer_read(rxport, UB953_REG_IND_ACC_DATA,
&temp_dynamic_cfg, &ret);
if (ret)
return ret;
temp_dynamic_cfg |= UB953_IND_ANA_TEMP_DYNAMIC_CFG_OV;
temp_dynamic_cfg += temp_dynamic_offset[ser_temp_code];
/* Update temp static config */
ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
UB953_IND_ANA_TEMP_STATIC_CFG, &ret);
ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
UB953_IND_ANA_TEMP_STATIC_CFG_MASK, &ret);
/* Update temperature ramp dynamic config */
ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
/* Enable I2C auto ack on BC before we set dynamic cfg and reset */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
UB960_RR_BCC_CONFIG_AUTO_ACK_ALL, &ret);
ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
temp_dynamic_cfg, &ret);
if (ret)
return ret;
/* Soft reset to apply PLL updates */
ub960_rxport_serializer_write(rxport, UB953_REG_RESET_CTL,
UB953_REG_RESET_CTL_DIGITAL_RESET_0,
&ret);
msleep(20);
/* Disable I2C passthrough and auto-ack on BC */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
0x0, &ret);
return ret;
}
static int ub960_rxport_bc_ser_config(struct ub960_rxport *rxport)
{
struct ub960_data *priv = rxport->priv;
struct device *dev = &priv->client->dev;
u8 nport = rxport->nport;
int ret = 0;
/* Skip port if serializer's address is not known */
if (rxport->ser.addr < 0) {
dev_dbg(dev,
"rx%u: serializer address missing, skip configuration\n" ,
nport);
return 0;
}
/*
* Note: the code here probably only works for CSI-2 serializers in
* sync mode. To support other serializers the BC related configuration
* should be done before calling this function.
*/
/* Enable I2C passthrough and auto-ack on BC */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
&ret);
if (ret)
return ret;
/* Disable BC alternate mode auto detect */
ub960_rxport_serializer_write(rxport, UB971_ENH_BC_CHK, 0x02, &ret);
/* Decrease link detect timer */
ub960_rxport_serializer_write(rxport, UB953_REG_BC_CTRL, 0x06, &ret);
/* Disable I2C passthrough and auto-ack on BC */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
0x0, &ret);
return ret;
}
static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
{
struct ub960_rxport *rxport = priv->rxports[nport];
struct device *dev = &priv->client->dev;
struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
struct i2c_board_info ser_info = {
.fwnode = rxport->ser.fwnode,
.platform_data = ser_pdata,
};
ser_pdata->port = nport;
ser_pdata->atr = priv->atr;
if (priv->hw_data->is_ub9702)
ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub9702(priv, rxport);
else
ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub960(priv, rxport);
/*
* The serializer is added under the same i2c adapter as the
* deserializer. This is not quite right, as the serializer is behind
* the FPD-Link.
*/
ser_info.addr = rxport->ser.alias;
rxport->ser.client =
i2c_new_client_device(priv->client->adapter, &ser_info);
if (IS_ERR(rxport->ser.client)) {
dev_err(dev, "rx%u: cannot add %s i2c device" , nport,
ser_info.type);
return PTR_ERR(rxport->ser.client);
}
dev_dbg(dev, "rx%u: remote serializer at alias 0x%02x (%u-%04x)\n" ,
nport, rxport->ser.client->addr,
rxport->ser.client->adapter->nr, rxport->ser.client->addr);
return 0;
}
static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
{
struct ub960_rxport *rxport = priv->rxports[nport];
i2c_unregister_device(rxport->ser.client);
rxport->ser.client = NULL;
}
/* Add serializer i2c devices for all initialized ports */
static int ub960_rxport_add_serializers(struct ub960_data *priv)
{
unsigned int failed_nport;
int ret;
for_each_active_rxport(priv, it) {
ret = ub960_rxport_add_serializer(priv, it.nport);
if (ret) {
failed_nport = it.nport;
goto err_remove_sers;
}
}
return 0;
err_remove_sers:
while (failed_nport--) {
struct ub960_rxport *rxport = priv->rxports[failed_nport];
if (!rxport)
continue ;
ub960_rxport_remove_serializer(priv, failed_nport);
}
return ret;
}
static void ub960_rxport_remove_serializers(struct ub960_data *priv)
{
for_each_active_rxport(priv, it)
ub960_rxport_remove_serializer(priv, it.nport);
}
static int ub960_init_tx_port(struct ub960_data *priv,
struct ub960_txport *txport)
{
unsigned int nport = txport->nport;
u8 csi_ctl = 0;
/*
* From the datasheet: "initial CSI Skew-Calibration
* sequence [...] should be set when operating at 1.6 Gbps"
*/
if (priv->tx_data_rate == MHZ(1600))
csi_ctl |= UB960_TR_CSI_CTL_CSI_CAL_EN;
csi_ctl |= (4 - txport->num_data_lanes) << 4;
if (!txport->non_continous_clk)
csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK;
return ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl, NULL);
}
static int ub960_init_tx_ports_ub960(struct ub960_data *priv)
{
u8 speed_select;
switch (priv->tx_data_rate) {
case MHZ(400):
speed_select = 3;
break ;
case MHZ(800):
speed_select = 2;
break ;
case MHZ(1200):
speed_select = 1;
break ;
case MHZ(1600):
default :
speed_select = 0;
break ;
}
return ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, NULL);
}
static int ub960_init_tx_ports_ub9702(struct ub960_data *priv)
{
u8 speed_select;
u8 ana_pll_div;
u8 pll_div;
int ret = 0;
switch (priv->tx_data_rate) {
case MHZ(400):
speed_select = 3;
pll_div = 0x10;
ana_pll_div = 0xa2;
break ;
case MHZ(800):
speed_select = 2;
pll_div = 0x10;
ana_pll_div = 0x92;
break ;
case MHZ(1200):
speed_select = 1;
pll_div = 0x18;
ana_pll_div = 0x90;
break ;
case MHZ(1500):
speed_select = 0;
pll_div = 0x0f;
ana_pll_div = 0x82;
break ;
case MHZ(1600):
default :
speed_select = 0;
pll_div = 0x10;
ana_pll_div = 0x82;
break ;
case MHZ(2500):
speed_select = 0x10;
pll_div = 0x19;
ana_pll_div = 0x80;
break ;
}
ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, &ret);
ub960_write(priv, UB9702_SR_CSI_PLL_DIV, pll_div, &ret);
ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA,
UB9702_IR_CSI_ANA_CSIPLL_REG_1, ana_pll_div, &ret);
return ret;
}
static int ub960_init_tx_ports(struct ub960_data *priv)
{
int ret;
if (priv->hw_data->is_ub9702)
ret = ub960_init_tx_ports_ub9702(priv);
else
ret = ub960_init_tx_ports_ub960(priv);
if (ret)
return ret;
for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
nport++) {
struct ub960_txport *txport = priv->txports[nport];
if (!txport)
continue ;
ret = ub960_init_tx_port(priv, txport);
if (ret)
return ret;
}
return 0;
}
static int ub960_init_rx_port_ub960(struct ub960_data *priv,
struct ub960_rxport *rxport)
{
unsigned int nport = rxport->nport;
u32 bc_freq_val;
int ret = 0;
/*
* Back channel frequency select.
* Override FREQ_SELECT from the strap.
* 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1)
* 2 - 10 Mbps
* 6 - 50 Mbps (DS90UB953-Q1)
*
* Note that changing this setting will result in some errors on the back
* channel for a short period of time.
*/
switch (rxport->rx_mode) {
case RXPORT_MODE_RAW10:
case RXPORT_MODE_RAW12_HF:
case RXPORT_MODE_RAW12_LF:
bc_freq_val = 0;
break ;
case RXPORT_MODE_CSI2_NONSYNC:
bc_freq_val = 2;
break ;
case RXPORT_MODE_CSI2_SYNC:
bc_freq_val = 6;
break ;
default :
return -EINVAL;
}
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK,
bc_freq_val, &ret);
switch (rxport->rx_mode) {
case RXPORT_MODE_RAW10:
/* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG,
UB960_RR_PORT_CONFIG_FPD3_MODE_MASK,
0x3, &ret);
/*
* RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits
*/
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK,
0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT,
&ret);
break ;
case RXPORT_MODE_RAW12_HF:
case RXPORT_MODE_RAW12_LF:
/* Not implemented */
return -EINVAL;
case RXPORT_MODE_CSI2_SYNC:
case RXPORT_MODE_CSI2_NONSYNC:
/* CSI-2 Mode (DS90UB953-Q1 compatible) */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
0x0, &ret);
break ;
}
/* LV_POLARITY & FV_POLARITY */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
rxport->lv_fv_pol, &ret);
/* Enable all interrupt sources from this port */
ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07, &ret);
ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f, &ret);
/* Enable I2C_PASS_THROUGH */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
/* Enable I2C communication to the serializer via the alias addr */
ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
rxport->ser.alias << 1, &ret);
/* Configure EQ related settings */
ub960_rxport_config_eq(priv, nport);
/* Enable RX port */
ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
&ret);
return ret;
}
static int ub960_init_rx_ports_ub960(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
unsigned int port_lock_mask;
unsigned int port_mask;
int ret;
for_each_active_rxport(priv, it) {
ret = ub960_init_rx_port_ub960(priv, it.rxport);
if (ret)
return ret;
}
ret = ub960_reset(priv, false );
if (ret)
return ret;
port_mask = 0;
for_each_active_rxport(priv, it)
port_mask |= BIT(it.nport);
ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
if (ret)
return ret;
if (port_mask != port_lock_mask) {
ret = -EIO;
dev_err_probe(dev, ret, "Failed to lock all RX ports\n" );
return ret;
}
/* Set temperature ramp on serializer */
for_each_active_rxport(priv, it) {
ret = ub960_serializer_temp_ramp(it.rxport);
if (ret)
return ret;
ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
&ret);
if (ret)
return ret;
}
/*
* Clear any errors caused by switching the RX port settings while
* probing.
*/
ret = ub960_clear_rx_errors(priv);
if (ret)
return ret;
return 0;
}
/*
* UB9702 specific initial RX port configuration
*/
static int ub960_turn_off_rxport_ub9702(struct ub960_data *priv,
unsigned int nport)
{
int ret = 0;
/* Disable RX port */
ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), 0, &ret);
/* Disable FPD Rx and FPD BC CMR */
ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_2, 0x1b, &ret);
/* Disable FPD BC Tx */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, BIT(4), 0,
&ret);
/* Disable internal RX blocks */
ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_1, 0x15, &ret);
/* Disable AEQ */
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB9702_IR_RX_ANA_AEQ_CFG_2, 0x03, &ret);
/* PI disabled and oDAC disabled */
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB9702_IR_RX_ANA_AEQ_CFG_4, 0x09, &ret);
/* AEQ configured for disabled link */
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB9702_IR_RX_ANA_AEQ_CFG_1, 0x20, &ret);
/* disable AEQ clock and DFE */
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB9702_IR_RX_ANA_AEQ_CFG_3, 0x45, &ret);
/* Powerdown FPD3 CDR */
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5, 0x82, &ret);
return ret;
}
static int ub960_set_bc_drv_config_ub9702(struct ub960_data *priv,
unsigned int nport)
{
u8 fpd_bc_ctl0;
u8 fpd_bc_ctl1;
u8 fpd_bc_ctl2;
int ret = 0;
if (priv->rxports[nport]->cdr_mode == RXPORT_CDR_FPD4) {
/* Set FPD PBC drv into FPD IV mode */
fpd_bc_ctl0 = 0;
fpd_bc_ctl1 = 0;
fpd_bc_ctl2 = 0;
} else {
/* Set FPD PBC drv into FPD III mode */
fpd_bc_ctl0 = 2;
fpd_bc_ctl1 = 1;
fpd_bc_ctl2 = 5;
}
ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
UB9702_IR_RX_ANA_FPD_BC_CTL0, GENMASK(7, 5),
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=97 H=92 G=94
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland