From 21618dd6df92faa3fd6fa7b8ea748f1b52ccdba4 Mon Sep 17 00:00:00 2001 From: Dorian Stoll Date: Sun, 11 Dec 2022 12:10:54 +0100 Subject: [PATCH] hid: Add support for Intel Touch Host Controller Based on quo/ithc-linux@67ebf09 Signed-off-by: Dorian Stoll Patchset: ithc --- drivers/hid/Kconfig | 2 + drivers/hid/Makefile | 1 + drivers/hid/ithc/Kbuild | 6 + drivers/hid/ithc/Kconfig | 12 + drivers/hid/ithc/ithc-api.c | 123 ++++++++ drivers/hid/ithc/ithc-api.h | 16 + drivers/hid/ithc/ithc-debug.c | 95 ++++++ drivers/hid/ithc/ithc-dma.c | 276 +++++++++++++++++ drivers/hid/ithc/ithc-dma.h | 80 +++++ drivers/hid/ithc/ithc-dump.c | 67 ++++ drivers/hid/ithc/ithc-main.c | 566 ++++++++++++++++++++++++++++++++++ drivers/hid/ithc/ithc-regs.c | 64 ++++ drivers/hid/ithc/ithc-regs.h | 186 +++++++++++ drivers/hid/ithc/ithc.h | 68 ++++ 14 files changed, 1562 insertions(+) create mode 100644 drivers/hid/ithc/Kbuild create mode 100644 drivers/hid/ithc/Kconfig create mode 100644 drivers/hid/ithc/ithc-api.c create mode 100644 drivers/hid/ithc/ithc-api.h create mode 100644 drivers/hid/ithc/ithc-debug.c create mode 100644 drivers/hid/ithc/ithc-dma.c create mode 100644 drivers/hid/ithc/ithc-dma.h create mode 100644 drivers/hid/ithc/ithc-dump.c create mode 100644 drivers/hid/ithc/ithc-main.c create mode 100644 drivers/hid/ithc/ithc-regs.c create mode 100644 drivers/hid/ithc/ithc-regs.h create mode 100644 drivers/hid/ithc/ithc.h diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 5cdf0eb4c5fa8..e53ad2c4582fa 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -1338,4 +1338,6 @@ source "drivers/hid/surface-hid/Kconfig" source "drivers/hid/ipts/Kconfig" +source "drivers/hid/ithc/Kconfig" + endmenu diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index a58e90cc50402..1181fc7b2a2ff 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -161,3 +161,4 @@ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/ obj-$(CONFIG_HID_IPTS) += ipts/ +obj-$(CONFIG_HID_ITHC) += ithc/ diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild new file mode 100644 index 0000000000000..429dcc67209ce --- /dev/null +++ b/drivers/hid/ithc/Kbuild @@ -0,0 +1,6 @@ +obj-m := ithc.o + +ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-api.o ithc-debug.o + +ccflags-y := -std=gnu11 -Wno-declaration-after-statement + diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig new file mode 100644 index 0000000000000..ede7130236096 --- /dev/null +++ b/drivers/hid/ithc/Kconfig @@ -0,0 +1,12 @@ +config HID_ITHC + tristate "Intel Touch Host Controller" + depends on PCI + depends on HID + help + Say Y here if your system has a touchscreen using Intels + Touch Host Controller (ITHC / IPTS) technology. + + If unsure say N. + + To compile this driver as a module, choose M here: the + module will be called ithc. diff --git a/drivers/hid/ithc/ithc-api.c b/drivers/hid/ithc/ithc-api.c new file mode 100644 index 0000000000000..136205ecd631c --- /dev/null +++ b/drivers/hid/ithc/ithc-api.c @@ -0,0 +1,123 @@ +#include "ithc.h" + +// Userspace API: +// All data received on the active DMA RX channel is made available to userspace through character device /dev/ithc (/dev/ithc[01] if both channels are active). +// The chardev is read-only and has no ioctls. Multitouch mode is automatically activated when the device is opened and deactivated when it is closed. +// Reading from the chardev will obtain one or more messages containing data, or block if there is no new data. +// Only complete messages can be read; if the provided user buffer is too small, read() will return -EMSGSIZE instead of performing a partial read. +// Each message has a header (struct ithc_api_header) containing a sequential message number and the size of the data. + +static loff_t ithc_api_llseek(struct file *f, loff_t offset, int whence) { + struct ithc_api *a = container_of(f->private_data, struct ithc_api, m); + return generic_file_llseek_size(f, offset, whence, MAX_LFS_FILESIZE, READ_ONCE(a->rx->pos)); +} + +static ssize_t ithc_api_read_unlocked(struct file *f, char __user *buf, size_t size, loff_t *offset) { + struct ithc_api *a = container_of(f->private_data, struct ithc_api, m); + loff_t pos = READ_ONCE(a->rx->pos); + unsigned n = a->rx->num_received; + unsigned newest = (n + NUM_RX_ALLOC - 1) % NUM_RX_ALLOC; + unsigned oldest = (n + NUM_RX_DEV) % NUM_RX_ALLOC; + unsigned i = newest; + + // scan backwards to find which buf we need to copy first + while (1) { + pos -= sizeof(struct ithc_api_header) + a->rx->bufs[i].data_size; + if (pos <= *offset) break; + if (i == oldest) break; + i = i ? i-1 : NUM_RX_ALLOC-1; + n--; + } + + // copy as many bufs as possible + size_t nread = 0; + while (nread + sizeof(struct ithc_api_header) + a->rx->bufs[i].data_size <= size) { + struct ithc_dma_data_buffer *b = &a->rx->bufs[i]; + if (b->active_idx >= 0) { pci_err(a->ithc->pci, "tried to access active buffer\n"); return -EFAULT; } // should be impossible + struct ithc_api_header hdr = { .hdr_size = sizeof hdr, .msg_num = n, .size = b->data_size }; + if (copy_to_user(buf + nread, &hdr, sizeof hdr)) return -EFAULT; + nread += sizeof hdr; + + if (copy_to_user(buf + nread, b->addr, b->data_size)) return -EFAULT; + nread += b->data_size; + + if (i == newest) break; + if (++i == NUM_RX_ALLOC) i = 0; + n++; + } + if (!nread) return -EMSGSIZE; // user buffer is too small + + *offset = pos + nread; + return nread; +} +static ssize_t ithc_api_read(struct file *f, char __user *buf, size_t size, loff_t *offset) { + struct ithc_api *a = container_of(f->private_data, struct ithc_api, m); + if (f->f_pos >= READ_ONCE(a->rx->pos)) { + if (f->f_flags & O_NONBLOCK) return -EWOULDBLOCK; + if (wait_event_interruptible(a->rx->wait, f->f_pos < READ_ONCE(a->rx->pos))) return -ERESTARTSYS; + } + // lock to avoid buffers getting transferred to device while we're reading them + if (mutex_lock_interruptible(&a->rx->mutex)) return -ERESTARTSYS; + int ret = ithc_api_read_unlocked(f, buf, size, offset); + mutex_unlock(&a->rx->mutex); + return ret; +} + +static __poll_t ithc_api_poll(struct file *f, struct poll_table_struct *pt) { + struct ithc_api *a = container_of(f->private_data, struct ithc_api, m); + poll_wait(f, &a->rx->wait, pt); + if (f->f_pos < READ_ONCE(a->rx->pos)) return POLLIN; + return 0; +} + +static int ithc_api_open(struct inode *n, struct file *f) { + struct ithc_api *a = container_of(f->private_data, struct ithc_api, m); + struct ithc *ithc = a->ithc; + if (atomic_fetch_inc(&a->open_count) == 0) CHECK(ithc_set_multitouch, ithc, true); + return 0; +} + +static int ithc_api_release(struct inode *n, struct file *f) { + struct ithc_api *a = container_of(f->private_data, struct ithc_api, m); + struct ithc *ithc = a->ithc; + int c = atomic_dec_return(&a->open_count); + if (c == 0) CHECK(ithc_set_multitouch, ithc, false); + if (c < 0) pci_err(ithc->pci, "open/release mismatch\n"); + return 0; +} + +static const struct file_operations dev_fops = { + .owner = THIS_MODULE, + .llseek = ithc_api_llseek, + .read = ithc_api_read, + .poll = ithc_api_poll, + .open = ithc_api_open, + .release = ithc_api_release, +}; + +static void ithc_api_devres_release(struct device *dev, void *res) { + struct ithc_api *a = res; + misc_deregister(&a->m); +} + +int ithc_api_init(struct ithc *ithc, struct ithc_dma_rx *rx, const char *name) { + struct ithc_api *a = devres_alloc(ithc_api_devres_release, sizeof *a, GFP_KERNEL); + if (!a) return -ENOMEM; + a->ithc = ithc; + a->rx = rx; + a->m.minor = MISC_DYNAMIC_MINOR; + a->m.name = name; + a->m.fops = &dev_fops; + a->m.mode = 0440; + a->m.parent = &ithc->pci->dev; + int r = CHECK(misc_register, &a->m); + if (r) { + devres_free(a); + return r; + } + pci_info(ithc->pci, "registered device %s\n", name); + devres_add(&ithc->pci->dev, a); + rx->api = a; + return 0; +} + diff --git a/drivers/hid/ithc/ithc-api.h b/drivers/hid/ithc/ithc-api.h new file mode 100644 index 0000000000000..94fde10b01c1c --- /dev/null +++ b/drivers/hid/ithc/ithc-api.h @@ -0,0 +1,16 @@ +struct ithc_api { + struct ithc *ithc; + struct ithc_dma_rx *rx; + struct miscdevice m; + atomic_t open_count; +}; + +struct ithc_api_header { + u8 hdr_size; + u8 reserved[3]; + u32 msg_num; + u32 size; +}; + +int ithc_api_init(struct ithc *ithc, struct ithc_dma_rx *rx, const char *name); + diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c new file mode 100644 index 0000000000000..b2a2d2e9250e0 --- /dev/null +++ b/drivers/hid/ithc/ithc-debug.c @@ -0,0 +1,95 @@ +#include "ithc.h" + +void ithc_log_regs(struct ithc *ithc) { + if (!ithc->prev_regs) return; + u32 *cur = (void*)ithc->regs, *prev = (void*)ithc->prev_regs; + for (int i = 1024; i < sizeof *ithc->regs / 4; i++) { + u32 x = readl(cur + i); + if (x != prev[i]) { + pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x); + prev[i] = x; + } + } +} + +static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) { + struct ithc *ithc = file_inode(f)->i_private; + char cmd[256]; + if (!ithc || !ithc->pci) return -ENODEV; + if (!len) return -EINVAL; + if (len >= sizeof cmd) return -EINVAL; + if (copy_from_user(cmd, buf, len)) return -EFAULT; + cmd[len] = 0; + if (cmd[len-1] == '\n') cmd[len-1] = 0; + pci_info(ithc->pci, "debug command: %s\n", cmd); + u32 n = 0; + const char *s = cmd + 1; + u32 a[32]; + while (*s && *s != '\n') { + if (n >= ARRAY_SIZE(a)) return -EINVAL; + if (*s++ != ' ') return -EINVAL; + char *e; + a[n++] = simple_strtoul(s, &e, 0); + if (e == s) return -EINVAL; + s = e; + } + ithc_log_regs(ithc); + switch(cmd[0]) { + case 'x': // reset + ithc_reset(ithc); + break; + case 'w': // write register: offset mask value + if (n != 3 || (a[0] & 3)) return -EINVAL; + pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]); + bitsl(((u32 *)ithc->regs) + a[0] / 4, a[1], a[2]); + break; + case 'r': // read register: offset + if (n != 1 || (a[0] & 3)) return -EINVAL; + pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((u32 *)ithc->regs) + a[0] / 4)); + break; + case 's': // spi command: cmd offset len data... + // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + // set touch cfg: s 6 12 4 XX + if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL; + pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]); + if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3)) + for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]); + break; + case 'd': // dma command: cmd len data... + // get report descriptor: d 7 8 0 0 + // enable multitouch: d 3 2 0x0105 + if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL; + pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]); + if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n"); + break; + default: + return -EINVAL; + } + ithc_log_regs(ithc); + return len; +} + +static const struct file_operations ithc_debugfops_cmd = { + .owner = THIS_MODULE, + .write = ithc_debugfs_cmd_write, +}; + +static void ithc_debugfs_devres_release(struct device *dev, void *res) { + struct dentry **dbgm = res; + if (*dbgm) debugfs_remove_recursive(*dbgm); +} + +int ithc_debug_init(struct ithc *ithc) { + struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL); + if (!dbgm) return -ENOMEM; + devres_add(&ithc->pci->dev, dbgm); + struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL); + if (IS_ERR(dbg)) return PTR_ERR(dbg); + *dbgm = dbg; + + struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd); + if (IS_ERR(cmd)) return PTR_ERR(cmd); + + return 0; +} + diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c new file mode 100644 index 0000000000000..983b4fe5ace07 --- /dev/null +++ b/drivers/hid/ithc/ithc-dma.c @@ -0,0 +1,276 @@ +#include "ithc.h" + +static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) { + p->num_pages = num_pages; + p->dir = dir; + p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE); + p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL); + if (!p->addr) return -ENOMEM; + if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT; + return 0; +} + +struct ithc_sg_table { + void *addr; + struct sg_table sgt; + enum dma_data_direction dir; +}; +static void ithc_dma_sgtable_free(struct sg_table *sgt) { + struct scatterlist *sg; + int i; + for_each_sgtable_sg(sgt, sg, i) { + struct page *p = sg_page(sg); + if (p) __free_page(p); + } + sg_free_table(sgt); +} +static void ithc_dma_data_devres_release(struct device *dev, void *res) { + struct ithc_sg_table *sgt = res; + if (sgt->addr) vunmap(sgt->addr); + dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0); + ithc_dma_sgtable_free(&sgt->sgt); +} + +static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) { + // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional). + // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now). + struct page *pages[16]; + if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL; + b->active_idx = -1; + struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL); + if (!sgt) return -ENOMEM; + sgt->dir = prds->dir; + if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) { + struct scatterlist *sg; + int i; + bool ok = true; + for_each_sgtable_sg(&sgt->sgt, sg, i) { + struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA + if (!p) { ok = false; break; } + sg_set_page(sg, p, PAGE_SIZE, 0); + } + if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) { + devres_add(&ithc->pci->dev, sgt); + b->sgt = &sgt->sgt; + b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL); + if (!b->addr) return -ENOMEM; + return 0; + } + ithc_dma_sgtable_free(&sgt->sgt); + } + devres_free(sgt); + return -ENOMEM; +} + +static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) { + struct ithc_phys_region_desc *prd = prds->addr; + prd += idx * prds->num_pages; + if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; } + b->active_idx = idx; + if (prds->dir == DMA_TO_DEVICE) { + if (b->data_size > PAGE_SIZE) return -EINVAL; + prd->addr = sg_dma_address(b->sgt->sgl) >> 10; + prd->size = b->data_size | PRD_END_FLAG; + flush_kernel_vmap_range(b->addr, b->data_size); + } else if (prds->dir == DMA_FROM_DEVICE) { + struct scatterlist *sg; + int i; + for_each_sgtable_dma_sg(b->sgt, sg, i) { + prd->addr = sg_dma_address(sg) >> 10; + prd->size = sg_dma_len(sg); + prd++; + } + prd[-1].size |= PRD_END_FLAG; + } + dma_wmb(); // for the prds + dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir); + return 0; +} + +static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) { + struct ithc_phys_region_desc *prd = prds->addr; + prd += idx * prds->num_pages; + if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; } + b->active_idx = -1; + if (prds->dir == DMA_FROM_DEVICE) { + dma_rmb(); // for the prds + b->data_size = 0; + struct scatterlist *sg; + int i; + for_each_sgtable_dma_sg(b->sgt, sg, i) { + unsigned size = prd->size; + b->data_size += size & PRD_SIZE_MASK; + if (size & PRD_END_FLAG) break; + if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; } + prd++; + } + invalidate_kernel_vmap_range(b->addr, b->data_size); + } + dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir); + return 0; +} + +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) { + struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; + mutex_init(&rx->mutex); + init_waitqueue_head(&rx->wait); + u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes); + unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE; + pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_ALLOC, buf_size, num_pages); + CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_ALLOC, num_pages, DMA_FROM_DEVICE); + for (unsigned i = 0; i < NUM_RX_ALLOC; i++) + CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]); + writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2); + lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr); + writeb(NUM_RX_DEV - 1, &ithc->regs->dma_rx[channel].num_bufs); + writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds); + u8 head = readb(&ithc->regs->dma_rx[channel].head); + if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; } + for (unsigned i = 0; i < NUM_RX_DEV; i++) + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i); + writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail); + ithc_api_init(ithc, rx, devname); + return 0; +} +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) { + bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA); + CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED); +} + +int ithc_dma_tx_init(struct ithc *ithc) { + struct ithc_dma_tx *tx = &ithc->dma_tx; + mutex_init(&tx->mutex); + tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes); + unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE; + pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages); + CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE); + CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf); + lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr); + writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds); + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); + return 0; +} + +static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) { + if (buf >= NUM_RX_DEV) { + pci_err(ithc->pci, "invalid dma ringbuffer index\n"); + return -EINVAL; + } + ithc_set_active(ithc); + u32 len = data->data_size; + struct ithc_dma_rx_header *hdr = data->addr; + u8 *hiddata = (void *)(hdr + 1); + struct ithc_hid_report_singletouch *st = (void *)hiddata; + if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) { + CHECK(ithc_reset, ithc); + } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) { + if (hdr->code == DMA_RX_CODE_INPUT_REPORT) { + // When the CPU enters a low power state during DMA, we can get truncated messages. + // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes. + // See also ithc_set_active(). + } else { + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size); + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0); + } + } else if (ithc->input && hdr->code == DMA_RX_CODE_INPUT_REPORT && hdr->data_size == sizeof *st && st->report_id == HID_REPORT_ID_SINGLETOUCH) { + input_report_key(ithc->input, BTN_TOUCH, st->button); + input_report_abs(ithc->input, ABS_X, st->x); + input_report_abs(ithc->input, ABS_Y, st->y); + input_sync(ithc->input); + } else if (ithc->hid && hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) { + CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8); + WRITE_ONCE(ithc->hid_parse_done, true); + wake_up(&ithc->wait_hid_parse); + } else if (ithc->hid && hdr->code == DMA_RX_CODE_INPUT_REPORT) { + CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1); + } else if (ithc->hid && hdr->code == DMA_RX_CODE_FEATURE_REPORT) { + bool done = false; + mutex_lock(&ithc->hid_get_feature_mutex); + if (ithc->hid_get_feature_buf) { + if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size; + memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size); + ithc->hid_get_feature_buf = NULL; + done = true; + } + mutex_unlock(&ithc->hid_get_feature_mutex); + if (done) wake_up(&ithc->wait_hid_get_feature); + else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1); + } else { + pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code); + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0); + } + return 0; +} + +static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) { + struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; + // buf idx tail..tail+NUM_RX_DEV-1 are mapped as dev idx tail%NUM_RX_DEV.. + // tail+NUM_RX_DEV = oldest unmapped buffer + unsigned n = rx->num_received; + u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head); + while (1) { + u8 tail = n % NUM_RX_DEV; + u8 tail_wrap = tail | ((n / NUM_RX_DEV) & 1 ? 0 : DMA_RX_WRAP_FLAG); + writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail); + // ringbuffer is full if tail_wrap == head_wrap + // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG + if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0; + + // take the buffer that the device just filled + struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_ALLOC]; + CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail); + // give the oldest buffer we have back to the device to replace the buffer we took + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[(n + NUM_RX_DEV) % NUM_RX_ALLOC], tail); + rx->num_received = ++n; + WRITE_ONCE(rx->pos, READ_ONCE(rx->pos) + sizeof(struct ithc_api_header) + b->data_size); + + // process data + CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail); + + wake_up(&rx->wait); + } +} +int ithc_dma_rx(struct ithc *ithc, u8 channel) { + struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; + mutex_lock(&rx->mutex); + int ret = ithc_dma_rx_unlocked(ithc, channel); + mutex_unlock(&rx->mutex); + return ret; +} + +static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) { + pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize); + struct ithc_dma_tx_header *hdr; + u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0; + unsigned fullsize = sizeof *hdr + datasize + padding; + if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL; + CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); + + ithc->dma_tx.buf.data_size = fullsize; + hdr = ithc->dma_tx.buf.addr; + hdr->code = cmdcode; + hdr->data_size = datasize; + u8 *dest = (void *)(hdr + 1); + memcpy(dest, data, datasize); + dest += datasize; + for (u8 p = 0; p < padding; p++) *dest++ = 0; + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); + + bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND); + CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); + writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status); + return 0; +} +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) { + mutex_lock(&ithc->dma_tx.mutex); + int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data); + mutex_unlock(&ithc->dma_tx.mutex); + return ret; +} + +int ithc_set_multitouch(struct ithc *ithc, bool enable) { + pci_info(ithc->pci, "%s multi-touch mode\n", enable ? "enabling" : "disabling"); + struct ithc_hid_feature_multitouch data = { .feature_id = HID_FEATURE_ID_MULTITOUCH, .enable = enable }; + return ithc_dma_tx(ithc, DMA_TX_CODE_SET_FEATURE, sizeof data, &data); +} + diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h new file mode 100644 index 0000000000000..cba2cac28d734 --- /dev/null +++ b/drivers/hid/ithc/ithc-dma.h @@ -0,0 +1,80 @@ +#define PRD_SIZE_MASK 0xffffff +#define PRD_END_FLAG 0x1000000 +struct ithc_phys_region_desc { + u64 addr; // physical addr/1024 + u32 size; // num bytes, PRD_END_FLAG marks last prd for data split over multiple prds + u32 unused; +}; + +#define DMA_RX_CODE_INPUT_REPORT 3 +#define DMA_RX_CODE_FEATURE_REPORT 4 +#define DMA_RX_CODE_REPORT_DESCRIPTOR 5 +#define DMA_RX_CODE_RESET 7 +struct ithc_dma_rx_header { + u32 code; + u32 data_size; + u32 _unknown[14]; +}; + +#define DMA_TX_CODE_SET_FEATURE 3 +#define DMA_TX_CODE_GET_FEATURE 4 +#define DMA_TX_CODE_OUTPUT_REPORT 5 +#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7 +struct ithc_dma_tx_header { + u32 code; + u32 data_size; +}; + +#define HID_REPORT_ID_SINGLETOUCH 0x40 +struct ithc_hid_report_singletouch { + u8 report_id; + u8 button; + u16 x; + u16 y; +}; + +#define HID_FEATURE_ID_MULTITOUCH 5 +struct ithc_hid_feature_multitouch { + u8 feature_id; + u8 enable; +}; + +struct ithc_dma_prd_buffer { + void *addr; + dma_addr_t dma_addr; + u32 size; + u32 num_pages; // per data buffer + enum dma_data_direction dir; +}; + +struct ithc_dma_data_buffer { + void *addr; + struct sg_table *sgt; + int active_idx; + u32 data_size; +}; + +struct ithc_dma_tx { + struct mutex mutex; + u32 max_size; + struct ithc_dma_prd_buffer prds; + struct ithc_dma_data_buffer buf; +}; + +struct ithc_dma_rx { + struct mutex mutex; + u32 num_received; + loff_t pos; + struct ithc_api *api; + struct ithc_dma_prd_buffer prds; + struct ithc_dma_data_buffer bufs[NUM_RX_ALLOC]; + wait_queue_head_t wait; +}; + +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname); +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel); +int ithc_dma_tx_init(struct ithc *ithc); +int ithc_dma_rx(struct ithc *ithc, u8 channel); +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata); +int ithc_set_multitouch(struct ithc *ithc, bool enable); + diff --git a/drivers/hid/ithc/ithc-dump.c b/drivers/hid/ithc/ithc-dump.c new file mode 100644 index 0000000000000..03caca329a42f --- /dev/null +++ b/drivers/hid/ithc/ithc-dump.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +// Simple debugging utility which dumps data from /dev/ithc. + +int main(int argc, char **argv) { + if (argc != 2) { + fprintf(stderr, "usage: %s DEVICE\n", argv[0]); + return 1; + } + + int fd = open(argv[1], O_RDONLY); + if (fd < 0) { + perror("open"); + return 1; + } + //lseek(fd, 0, SEEK_END); + + uint8_t buf[0x10000]; + uint32_t next = 0; + while (1) { + ssize_t r = read(fd, buf, sizeof buf); + if (r < 0) { + perror("read"); + return 1; + } + ssize_t i = 0; + while (i < r) { + uint32_t *hdr = (void *)(buf + i); + // api header + uint32_t hdr_size = hdr[0] & 0xff; + uint32_t msg_num = hdr[1]; + uint32_t size = hdr[2]; + if (msg_num != next) fprintf(stderr, "missed messages %"PRIu32" to %"PRIu32"\n", next, msg_num-1); + next = msg_num + 1; + printf("%10"PRIu32" %5"PRIu32":", msg_num, size); + i += hdr_size; + hdr += hdr_size / 4; + // device header + uint32_t nhdr = 16; + if (size < nhdr * 4) nhdr = size / 4; + for (uint32_t j = 0; j < nhdr; j++) printf(j == 1 ? " %4"PRIu32 : " %"PRIu32, hdr[j]); + i += 4 * nhdr; + size -= 4 * nhdr; + // device data + printf(" |"); + uint32_t nprint = 32; + if (size < nprint) nprint = size; + for (uint32_t j = 0; j < nprint; j++) printf(" %02x", buf[i + j]); + if (nprint < size) printf(" + %"PRIu32" B", size - nprint); + i += size; + printf("\n"); + } + if (i != r) { + fprintf(stderr, "position mismatch\n"); + return 1; + } + } + + return 0; +} diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c new file mode 100644 index 0000000000000..370906f78c3f3 --- /dev/null +++ b/drivers/hid/ithc/ithc-main.c @@ -0,0 +1,566 @@ +#include "ithc.h" + +MODULE_DESCRIPTION("Intel Touch Host Controller driver"); +MODULE_LICENSE("Dual BSD/GPL"); + +// Lakefield +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0 +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1 +// Tiger Lake +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0 +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1 +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0 +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1 +// Alder Lake +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8 +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9 +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0 +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1 +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0 +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1 +// Raptor Lake +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58 +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59 +// Meteor Lake +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48 +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a + +static const struct pci_device_id ithc_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) }, + {} +}; +MODULE_DEVICE_TABLE(pci, ithc_pci_tbl); + +// Module parameters + +static bool ithc_use_polling = false; +module_param_named(poll, ithc_use_polling, bool, 0); +MODULE_PARM_DESC(poll, "Use polling instead of interrupts"); + +static bool ithc_use_hid = true; +module_param_named(hid, ithc_use_hid, bool, 0); +MODULE_PARM_DESC(hid, "Operate as an HID transport driver instead of an input driver"); + +static bool ithc_use_rx0 = false; +module_param_named(rx0, ithc_use_rx0, bool, 0); +MODULE_PARM_DESC(rx0, "Use DMA RX channel 0"); + +static bool ithc_use_rx1 = true; +module_param_named(rx1, ithc_use_rx1, bool, 0); +MODULE_PARM_DESC(rx1, "Use DMA RX channel 1"); + +static bool ithc_log_regs_enabled = false; +module_param_named(logregs, ithc_log_regs_enabled, bool, 0); +MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)"); + +// Sysfs attributes + +static bool ithc_is_config_valid(struct ithc *ithc) { + return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC; +} + +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct ithc *ithc = dev_get_drvdata(dev); + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; + return sprintf(buf, "0x%04x", ithc->config.vendor_id); +} +DEVICE_ATTR_RO(vendor); +static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct ithc *ithc = dev_get_drvdata(dev); + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; + return sprintf(buf, "0x%04x", ithc->config.product_id); +} +DEVICE_ATTR_RO(product); +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct ithc *ithc = dev_get_drvdata(dev); + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; + return sprintf(buf, "%u", ithc->config.revision); +} +DEVICE_ATTR_RO(revision); +static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct ithc *ithc = dev_get_drvdata(dev); + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; + u32 v = ithc->config.fw_version; + return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff); +} +DEVICE_ATTR_RO(fw_version); + +static const struct attribute_group *ithc_attribute_groups[] = { + &(const struct attribute_group){ + .name = DEVNAME, + .attrs = (struct attribute *[]){ + &dev_attr_vendor.attr, + &dev_attr_product.attr, + &dev_attr_revision.attr, + &dev_attr_fw_version.attr, + NULL + }, + }, + NULL +}; + +// HID/input setup + +static int ithc_hid_start(struct hid_device *hdev) { return 0; } +static void ithc_hid_stop(struct hid_device *hdev) { } +static int ithc_hid_open(struct hid_device *hdev) { return 0; } +static void ithc_hid_close(struct hid_device *hdev) { } + +static int ithc_hid_parse(struct hid_device *hdev) { + struct ithc *ithc = hdev->driver_data; + u64 val = 0; + WRITE_ONCE(ithc->hid_parse_done, false); + CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val); + if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT; + return 0; +} + +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { + struct ithc *ithc = hdev->driver_data; + if (!buf || !len) return -EINVAL; + u32 code; + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT; + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE; + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE; + else { + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum); + return -EINVAL; + } + buf[0] = reportnum; + if (reqtype == HID_REQ_GET_REPORT) { + mutex_lock(&ithc->hid_get_feature_mutex); + ithc->hid_get_feature_buf = buf; + ithc->hid_get_feature_size = len; + mutex_unlock(&ithc->hid_get_feature_mutex); + int r = CHECK(ithc_dma_tx, ithc, code, 1, buf); + if (!r) { + r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000)); + if (!r) r = -ETIMEDOUT; + else if (r < 0) r = -EINTR; + else r = 0; + } + mutex_lock(&ithc->hid_get_feature_mutex); + ithc->hid_get_feature_buf = NULL; + if (!r) r = ithc->hid_get_feature_size; + mutex_unlock(&ithc->hid_get_feature_mutex); + return r; + } + CHECK_RET(ithc_dma_tx, ithc, code, len, buf); + return 0; +} + +static struct hid_ll_driver ithc_ll_driver = { + .start = ithc_hid_start, + .stop = ithc_hid_stop, + .open = ithc_hid_open, + .close = ithc_hid_close, + .parse = ithc_hid_parse, + .raw_request = ithc_hid_raw_request, +}; + +static void ithc_hid_devres_release(struct device *dev, void *res) { + struct hid_device **hidm = res; + if (*hidm) hid_destroy_device(*hidm); +} + +static int ithc_hid_init(struct ithc *ithc) { + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL); + if (!hidm) return -ENOMEM; + devres_add(&ithc->pci->dev, hidm); + struct hid_device *hid = hid_allocate_device(); + if (IS_ERR(hid)) return PTR_ERR(hid); + *hidm = hid; + + strscpy(hid->name, DEVFULLNAME, sizeof(hid->name)); + strscpy(hid->phys, ithc->phys, sizeof(hid->phys)); + hid->ll_driver = &ithc_ll_driver; + hid->bus = BUS_PCI; + hid->vendor = ithc->config.vendor_id; + hid->product = ithc->config.product_id; + hid->version = 0x100; + hid->dev.parent = &ithc->pci->dev; + hid->driver_data = ithc; + + ithc->hid = hid; + return 0; +} + +static int ithc_input_init(struct ithc *ithc) { + struct input_dev *d = devm_input_allocate_device(&ithc->pci->dev); + if (!d) return -ENOMEM; + + d->name = DEVFULLNAME; + d->phys = ithc->phys; + d->id.bustype = BUS_PCI; + d->id.vendor = ithc->config.vendor_id; + d->id.product = ithc->config.product_id; + d->id.version = ithc->config.revision; + d->dev.parent = &ithc->pci->dev; + + __set_bit(INPUT_PROP_DIRECT, d->propbit); + input_set_capability(d, EV_KEY, BTN_TOUCH); + input_set_abs_params(d, ABS_X, 0, 32767, 0, 0); + input_set_abs_params(d, ABS_Y, 0, 32767, 0, 0); + + CHECK_RET(input_register_device, d); + + ithc->input = d; + return 0; +} + +// Interrupts/polling + +static void ithc_activity_timer_callback(struct timer_list *t) { + struct ithc *ithc = container_of(t, struct ithc, activity_timer); + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE); +} + +void ithc_set_active(struct ithc *ithc) { + // When CPU usage is very low, the CPU can enter various low power states (C2-C10). + // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens. + // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor). + // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions. + cpu_latency_qos_update_request(&ithc->activity_qos, 0); + mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000)); +} + +static int ithc_set_device_enabled(struct ithc *ithc, bool enable) { + u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 + | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0); + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x); +} + +static void ithc_disable_interrupts(struct ithc *ithc) { + writel(0, &ithc->regs->error_control); + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0); + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0); + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0); + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0); +} + +static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) { + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status); +} + +static void ithc_clear_interrupts(struct ithc *ithc) { + writel(0xffffffff, &ithc->regs->error_flags); + writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status); + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); + ithc_clear_dma_rx_interrupts(ithc, 0); + ithc_clear_dma_rx_interrupts(ithc, 1); + writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status); +} + +static void ithc_process(struct ithc *ithc) { + ithc_log_regs(ithc); + + // read and clear error bits + u32 err = readl(&ithc->regs->error_flags); + if (err) { + if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err); + writel(err, &ithc->regs->error_flags); + } + + // process DMA rx + if (ithc_use_rx0) { + ithc_clear_dma_rx_interrupts(ithc, 0); + ithc_dma_rx(ithc, 0); + } + if (ithc_use_rx1) { + ithc_clear_dma_rx_interrupts(ithc, 1); + ithc_dma_rx(ithc, 1); + } + + ithc_log_regs(ithc); +} + +static irqreturn_t ithc_interrupt_thread(int irq, void *arg) { + struct ithc *ithc = arg; + pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n", + readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags), + readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status), + readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status), + readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status), + readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status)); + ithc_process(ithc); + return IRQ_HANDLED; +} + +static int ithc_poll_thread(void *arg) { + struct ithc *ithc = arg; + unsigned sleep = 100; + while (!kthread_should_stop()) { + u32 n = ithc->dma_rx[1].num_received; + ithc_process(ithc); + if (n != ithc->dma_rx[1].num_received) sleep = 20; + else sleep = min(200u, sleep + (sleep >> 4) + 1); + msleep_interruptible(sleep); + } + return 0; +} + +// Device initialization and shutdown + +static void ithc_disable(struct ithc *ithc) { + bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE); + CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED); + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0); + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0); + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0); + ithc_disable_interrupts(ithc); + ithc_clear_interrupts(ithc); +} + +static int ithc_init_device(struct ithc *ithc) { + ithc_log_regs(ithc); + bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0; + ithc_disable(ithc); + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY); + ithc_set_spi_config(ithc, 10, 0); + bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config + + if (was_enabled) if (msleep_interruptible(100)) return -EINTR; + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0); + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0); + for (int retries = 0; ; retries++) { + ithc_log_regs(ithc); + bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET); + if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break; + if (retries > 5) { + pci_err(ithc->pci, "too many retries, failed to reset device\n"); + return -ETIMEDOUT; + } + pci_err(ithc->pci, "invalid state, retrying reset\n"); + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); + if (msleep_interruptible(1000)) return -EINTR; + } + ithc_log_regs(ithc); + + CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4); + + // read config + for (int retries = 0; ; retries++) { + ithc_log_regs(ithc); + memset(&ithc->config, 0, sizeof ithc->config); + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config); + u32 *p = (void *)&ithc->config; + pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); + if (ithc_is_config_valid(ithc)) break; + if (retries > 10) { + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id); + return -EIO; + } + pci_err(ithc->pci, "failed to read config, retrying\n"); + if (msleep_interruptible(100)) return -EINTR; + } + ithc_log_regs(ithc); + + CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config)); + CHECK_RET(ithc_set_device_enabled, ithc, true); + ithc_log_regs(ithc); + return 0; +} + +int ithc_reset(struct ithc *ithc) { + pci_err(ithc->pci, "reset\n"); + CHECK(ithc_init_device, ithc); + if (ithc_use_rx0) { + ithc_dma_rx_enable(ithc, 0); + if (ithc->dma_rx[0].api && atomic_read(&ithc->dma_rx[0].api->open_count)) CHECK(ithc_set_multitouch, ithc, true); + } + if (ithc_use_rx1) { + ithc_dma_rx_enable(ithc, 1); + if (ithc->dma_rx[1].api && atomic_read(&ithc->dma_rx[1].api->open_count)) CHECK(ithc_set_multitouch, ithc, true); + } + ithc_log_regs(ithc); + pci_dbg(ithc->pci, "reset completed\n"); + return 0; +} + +static void ithc_stop(void *res) { + struct ithc *ithc = res; + pci_dbg(ithc->pci, "stopping\n"); + ithc_log_regs(ithc); + if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread); + if (ithc->irq >= 0) disable_irq(ithc->irq); + CHECK(ithc_set_device_enabled, ithc, false); + ithc_disable(ithc); + del_timer_sync(&ithc->activity_timer); + cpu_latency_qos_remove_request(&ithc->activity_qos); + // clear dma config + for(unsigned i = 0; i < 2; i++) { + CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0); + lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr); + writeb(0, &ithc->regs->dma_rx[i].num_bufs); + writeb(0, &ithc->regs->dma_rx[i].num_prds); + } + lo_hi_writeq(0, &ithc->regs->dma_tx.addr); + writeb(0, &ithc->regs->dma_tx.num_prds); + ithc_log_regs(ithc); + pci_dbg(ithc->pci, "stopped\n"); +} + +static void ithc_clear_drvdata(void *res) { + struct pci_dev *pci = res; + pci_set_drvdata(pci, NULL); +} + +static int ithc_start(struct pci_dev *pci) { + pci_dbg(pci, "starting\n"); + if (pci_get_drvdata(pci)) { + pci_err(pci, "device already initialized\n"); + return -EINVAL; + } + if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM; + + struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL); + if (!ithc) return -ENOMEM; + ithc->irq = -1; + ithc->pci = pci; + snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci)); + init_waitqueue_head(&ithc->wait_hid_parse); + init_waitqueue_head(&ithc->wait_hid_get_feature); + mutex_init(&ithc->hid_get_feature_mutex); + pci_set_drvdata(pci, ithc); + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci); + if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL); + + CHECK_RET(pcim_enable_device, pci); + pci_set_master(pci); + CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs"); + CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64)); + CHECK_RET(pci_set_power_state, pci, PCI_D0); + ithc->regs = pcim_iomap_table(pci)[0]; + + if (!ithc_use_polling) { + CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); + ithc->irq = CHECK(pci_irq_vector, pci, 0); + if (ithc->irq < 0) return ithc->irq; + } + + CHECK_RET(ithc_init_device, ithc); + CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups); + if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME); + if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME); + CHECK_RET(ithc_dma_tx_init, ithc); + + if (ithc_use_hid) CHECK_RET(ithc_hid_init, ithc); + else CHECK_RET(ithc_input_init, ithc); + + cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE); + timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0); + + // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc); + + if (ithc_use_polling) { + pci_info(pci, "using polling instead of irq\n"); + // use a thread instead of simple timer because we want to be able to sleep + ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll"); + if (IS_ERR(ithc->poll_thread)) { + int err = PTR_ERR(ithc->poll_thread); + ithc->poll_thread = NULL; + return err; + } + } else { + CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc); + } + + if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0); + if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1); + + // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA + if (ithc->hid) CHECK_RET(hid_add_device, ithc->hid); + + CHECK(ithc_debug_init, ithc); + + pci_dbg(pci, "started\n"); + return 0; +} + +static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) { + pci_dbg(pci, "device probe\n"); + return ithc_start(pci); +} + +static void ithc_remove(struct pci_dev *pci) { + pci_dbg(pci, "device remove\n"); + // all cleanup is handled by devres +} + +static int ithc_suspend(struct device *dev) { + struct pci_dev *pci = to_pci_dev(dev); + pci_dbg(pci, "pm suspend\n"); + devres_release_group(dev, ithc_start); + return 0; +} + +static int ithc_resume(struct device *dev) { + struct pci_dev *pci = to_pci_dev(dev); + pci_dbg(pci, "pm resume\n"); + return ithc_start(pci); +} + +static int ithc_freeze(struct device *dev) { + struct pci_dev *pci = to_pci_dev(dev); + pci_dbg(pci, "pm freeze\n"); + devres_release_group(dev, ithc_start); + return 0; +} + +static int ithc_thaw(struct device *dev) { + struct pci_dev *pci = to_pci_dev(dev); + pci_dbg(pci, "pm thaw\n"); + return ithc_start(pci); +} + +static int ithc_restore(struct device *dev) { + struct pci_dev *pci = to_pci_dev(dev); + pci_dbg(pci, "pm restore\n"); + return ithc_start(pci); +} + +static struct pci_driver ithc_driver = { + .name = DEVNAME, + .id_table = ithc_pci_tbl, + .probe = ithc_probe, + .remove = ithc_remove, + .driver.pm = &(const struct dev_pm_ops) { + .suspend = ithc_suspend, + .resume = ithc_resume, + .freeze = ithc_freeze, + .thaw = ithc_thaw, + .restore = ithc_restore, + }, + //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway +}; + +static int __init ithc_init(void) { + return pci_register_driver(&ithc_driver); +} + +static void __exit ithc_exit(void) { + pci_unregister_driver(&ithc_driver); +} + +module_init(ithc_init); +module_exit(ithc_exit); + diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c new file mode 100644 index 0000000000000..18e58dd6ad191 --- /dev/null +++ b/drivers/hid/ithc/ithc-regs.c @@ -0,0 +1,64 @@ +#include "ithc.h" + +#define reg_num(r) (0x1fff & (u16)(u64)(r)) + +void bitsl(u32 *reg, u32 mask, u32 val) { + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask); + writel((readl(reg) & ~mask) | (val & mask), reg); +} + +void bitsb(u8 *reg, u8 mask, u8 val) { + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask); + writeb((readb(reg) & ~mask) | (val & mask), reg); +} + +int waitl(struct ithc *ithc, u32 *reg, u32 mask, u32 val) { + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val); + u32 x; + if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val); + return -ETIMEDOUT; + } + pci_dbg(ithc->pci, "done waiting\n"); + return 0; +} + +int waitb(struct ithc *ithc, u8 *reg, u8 mask, u8 val) { + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val); + u8 x; + if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val); + return -ETIMEDOUT; + } + pci_dbg(ithc->pci, "done waiting\n"); + return 0; +} + +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) { + pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode); + if (mode == 3) mode = 2; + bitsl(&ithc->regs->spi_config, + SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff), + SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed)); + return 0; +} + +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) { + pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset); + if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL; + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); + writeb(command, &ithc->regs->spi_cmd.code); + writew(size, &ithc->regs->spi_cmd.size); + writel(offset, &ithc->regs->spi_cmd.offset); + u32 *p = data, n = (size + 3) / 4; + for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]); + bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND); + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); + if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO; + if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE; + for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]); + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); + return 0; +} + diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h new file mode 100644 index 0000000000000..90e8b702f1d15 --- /dev/null +++ b/drivers/hid/ithc/ithc-regs.h @@ -0,0 +1,186 @@ +#define CONTROL_QUIESCE BIT(1) +#define CONTROL_IS_QUIESCED BIT(2) +#define CONTROL_NRESET BIT(3) +#define CONTROL_READY BIT(29) + +#define SPI_CONFIG_MODE(x) (((x) & 3) << 2) +#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4) +#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18) +#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode? + +#define ERROR_CONTROL_UNKNOWN_0 BIT(0) +#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs +#define ERROR_CONTROL_UNKNOWN_2 BIT(2) +#define ERROR_CONTROL_UNKNOWN_3 BIT(3) +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9) +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10) +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12) +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13) +#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq? +#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs + +#define ERROR_STATUS_DMA BIT(28) +#define ERROR_STATUS_SPI BIT(30) + +#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9) +#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10) +#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message +#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13) +#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16) +#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17) +#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18) +#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19) +#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20) +#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21) + +#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete +#define SPI_CMD_CONTROL_IRQ BIT(1) + +#define SPI_CMD_CODE_READ 4 +#define SPI_CMD_CODE_WRITE 6 + +#define SPI_CMD_STATUS_DONE BIT(0) +#define SPI_CMD_STATUS_ERROR BIT(1) +#define SPI_CMD_STATUS_BUSY BIT(3) + +#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete +#define DMA_TX_CONTROL_IRQ BIT(3) + +#define DMA_TX_STATUS_DONE BIT(0) +#define DMA_TX_STATUS_ERROR BIT(1) +#define DMA_TX_STATUS_UNKNOWN_2 BIT(2) +#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy? + +#define DMA_RX_CONTROL_ENABLE BIT(0) +#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only? +#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only? +#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only? +#define DMA_RX_CONTROL_IRQ_DATA BIT(5) + +#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only? +#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices + +#define DMA_RX_WRAP_FLAG BIT(7) + +#define DMA_RX_STATUS_ERROR BIT(3) +#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms) +#define DMA_RX_STATUS_HAVE_DATA BIT(5) +#define DMA_RX_STATUS_ENABLED BIT(8) + +#define COUNTER_RESET BIT(31) + +struct ithc_registers { + /* 0000 */ u32 _unknown_0000[1024]; + /* 1000 */ u32 _unknown_1000; + /* 1004 */ u32 _unknown_1004; + /* 1008 */ u32 control_bits; + /* 100c */ u32 _unknown_100c; + /* 1010 */ u32 spi_config; + /* 1014 */ u32 _unknown_1014[3]; + /* 1020 */ u32 error_control; + /* 1024 */ u32 error_status; // write to clear + /* 1028 */ u32 error_flags; // write to clear + /* 102c */ u32 _unknown_102c[5]; + struct { + /* 1040 */ u8 control; + /* 1041 */ u8 code; + /* 1042 */ u16 size; + /* 1044 */ u32 status; // write to clear + /* 1048 */ u32 offset; + /* 104c */ u32 data[16]; + /* 108c */ u32 _unknown_108c; + } spi_cmd; + struct { + /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() + /* 1098 */ u8 control; + /* 1099 */ u8 _unknown_1099; + /* 109a */ u8 _unknown_109a; + /* 109b */ u8 num_prds; + /* 109c */ u32 status; // write to clear + } dma_tx; + /* 10a0 */ u32 _unknown_10a0[7]; + /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET + /* 10c0 */ u32 _unknown_10c0[8]; + /* 10e0 */ u32 _unknown_10e0_counters[3]; + /* 10ec */ u32 _unknown_10ec[5]; + struct { + /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() + /* 1108/1208 */ u8 num_bufs; + /* 1109/1209 */ u8 num_prds; + /* 110a/120a */ u16 _unknown_110a; + /* 110c/120c */ u8 control; + /* 110d/120d */ u8 head; + /* 110e/120e */ u8 tail; + /* 110f/120f */ u8 control2; + /* 1110/1210 */ u32 status; // write to clear + /* 1114/1214 */ u32 _unknown_1114; + /* 1118/1218 */ u64 _unknown_1118_guc_addr; + /* 1120/1220 */ u32 _unknown_1120_guc; + /* 1124/1224 */ u32 _unknown_1124_guc; + /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related + /* 112c/122c */ u32 _unknown_112c; + /* 1130/1230 */ u64 _unknown_1130_guc_addr; + /* 1138/1238 */ u32 _unknown_1138_guc; + /* 113c/123c */ u32 _unknown_113c; + /* 1140/1240 */ u32 _unknown_1140_guc; + /* 1144/1244 */ u32 _unknown_1144[23]; + /* 11a0/12a0 */ u32 _unknown_11a0_counters[6]; + /* 11b8/12b8 */ u32 _unknown_11b8[18]; + } dma_rx[2]; +}; +_Static_assert(sizeof(struct ithc_registers) == 0x1300); + +#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6) +#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6) + +#define DEVCFG_TOUCH_MASK 0x3f +#define DEVCFG_TOUCH_ENABLE BIT(0) +#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1) +#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2) +#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3) +#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4) +#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5) +#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6) + +#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC" + +#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode? +#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3) +#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f) +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) +#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7) +#define DEVCFG_SPI_UNKNOWN_25 BIT(25) +#define DEVCFG_SPI_UNKNOWN_26 BIT(26) +#define DEVCFG_SPI_UNKNOWN_27 BIT(27) +#define DEVCFG_SPI_DELAY (((x) >> 28) & 7) +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) + +struct ithc_device_config { + u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET) + u32 _unknown_04; // 04 = 0x00000000 + u32 dma_buf_sizes; // 08 = 0x000a00ff + u32 touch_cfg; // 0c = 0x0000001c + u32 _unknown_10; // 10 = 0x0000001c + u32 device_id; // 14 = 0x43495424 = "$TIC" + u32 spi_config; // 18 = 0xfda00a2e + u16 vendor_id; // 1c = 0x045e = Microsoft Corp. + u16 product_id; // 1e = 0x0c1a + u32 revision; // 20 = 0x00000001 + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 + u32 _unknown_28; // 28 = 0x00000000 + u32 fw_mode; // 2c = 0x00000000 + u32 _unknown_30; // 30 = 0x00000000 + u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?) + u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET) + u32 _unknown_3c; // 3c = 0x00000002 +}; + +void bitsl(u32 *reg, u32 mask, u32 val); +void bitsb(u8 *reg, u8 mask, u8 val); +#define bitsl_set(reg, x) bitsl(reg, x, x) +#define bitsb_set(reg, x) bitsb(reg, x, x) +int waitl(struct ithc *ithc, u32 *reg, u32 mask, u32 val); +int waitb(struct ithc *ithc, u8 *reg, u8 mask, u8 val); +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode); +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data); + diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h new file mode 100644 index 0000000000000..da38f4be5ef18 --- /dev/null +++ b/drivers/hid/ithc/ithc.h @@ -0,0 +1,68 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEVNAME "ithc" +#define DEVFULLNAME "Intel Touch Host Controller" + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; }) +#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0) + +// We allocate NUM_RX_ALLOC buffers, and take a sliding window of NUM_RX_DEV buffers from that to populate the device ringbuffer. +// The remaining buffers outside the window contain old data that we keep around for sending to userspace. +#define NUM_RX_ALLOC 32 +#define NUM_RX_DEV 16 +_Static_assert(NUM_RX_ALLOC % NUM_RX_DEV == 0, "NUM_RX_ALLOC must be a multiple of NUM_RX_DEV"); + +struct ithc; +struct ithc_api; + +#include "ithc-regs.h" +#include "ithc-dma.h" +#include "ithc-api.h" + +struct ithc { + char phys[32]; + struct pci_dev *pci; + int irq; + struct task_struct *poll_thread; + struct pm_qos_request activity_qos; + struct timer_list activity_timer; + + struct input_dev *input; + + struct hid_device *hid; + bool hid_parse_done; + wait_queue_head_t wait_hid_parse; + wait_queue_head_t wait_hid_get_feature; + struct mutex hid_get_feature_mutex; + void *hid_get_feature_buf; + size_t hid_get_feature_size; + + struct ithc_registers *regs; + struct ithc_registers *prev_regs; // for debugging + struct ithc_device_config config; + struct ithc_dma_rx dma_rx[2]; + struct ithc_dma_tx dma_tx; +}; + +int ithc_reset(struct ithc *ithc); +void ithc_set_active(struct ithc *ithc); +int ithc_debug_init(struct ithc *ithc); +void ithc_log_regs(struct ithc *ithc); +