Skip to content

Commit

Permalink
hid: Add support for Intel Touch Host Controller
Browse files Browse the repository at this point in the history
Based on quo/ithc-linux@55803a2

Signed-off-by: Dorian Stoll <[email protected]>
Patchset: ithc
  • Loading branch information
StollD committed Jan 24, 2023
1 parent e83b07c commit 4b37b1c
Show file tree
Hide file tree
Showing 11 changed files with 1,286 additions and 0 deletions.
2 changes: 2 additions & 0 deletions drivers/hid/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1292,4 +1292,6 @@ source "drivers/hid/surface-hid/Kconfig"

source "drivers/hid/ipts/Kconfig"

source "drivers/hid/ithc/Kconfig"

endmenu
1 change: 1 addition & 0 deletions drivers/hid/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -166,3 +166,4 @@ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/

obj-$(CONFIG_HID_IPTS) += ipts/
obj-$(CONFIG_HID_ITHC) += ithc/
6 changes: 6 additions & 0 deletions drivers/hid/ithc/Kbuild
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
obj-$(CONFIG_HID_ITHC) := ithc.o

ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o

ccflags-y := -std=gnu11 -Wno-declaration-after-statement

12 changes: 12 additions & 0 deletions drivers/hid/ithc/Kconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
config HID_ITHC
tristate "Intel Touch Host Controller"
depends on PCI
depends on HID
help
Say Y here if your system has a touchscreen using Intels
Touch Host Controller (ITHC / IPTS) technology.

If unsure say N.

To compile this driver as a module, choose M here: the
module will be called ithc.
96 changes: 96 additions & 0 deletions drivers/hid/ithc/ithc-debug.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#include "ithc.h"

void ithc_log_regs(struct ithc *ithc) {
if (!ithc->prev_regs) return;
u32 __iomem *cur = (__iomem void*)ithc->regs;
u32 *prev = (void*)ithc->prev_regs;
for (int i = 1024; i < sizeof *ithc->regs / 4; i++) {
u32 x = readl(cur + i);
if (x != prev[i]) {
pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
prev[i] = x;
}
}
}

static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) {
struct ithc *ithc = file_inode(f)->i_private;
char cmd[256];
if (!ithc || !ithc->pci) return -ENODEV;
if (!len) return -EINVAL;
if (len >= sizeof cmd) return -EINVAL;
if (copy_from_user(cmd, buf, len)) return -EFAULT;
cmd[len] = 0;
if (cmd[len-1] == '\n') cmd[len-1] = 0;
pci_info(ithc->pci, "debug command: %s\n", cmd);
u32 n = 0;
const char *s = cmd + 1;
u32 a[32];
while (*s && *s != '\n') {
if (n >= ARRAY_SIZE(a)) return -EINVAL;
if (*s++ != ' ') return -EINVAL;
char *e;
a[n++] = simple_strtoul(s, &e, 0);
if (e == s) return -EINVAL;
s = e;
}
ithc_log_regs(ithc);
switch(cmd[0]) {
case 'x': // reset
ithc_reset(ithc);
break;
case 'w': // write register: offset mask value
if (n != 3 || (a[0] & 3)) return -EINVAL;
pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]);
bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
break;
case 'r': // read register: offset
if (n != 1 || (a[0] & 3)) return -EINVAL;
pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
break;
case 's': // spi command: cmd offset len data...
// read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// set touch cfg: s 6 12 4 XX
if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL;
pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
break;
case 'd': // dma command: cmd len data...
// get report descriptor: d 7 8 0 0
// enable multitouch: d 3 2 0x0105
if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL;
pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n");
break;
default:
return -EINVAL;
}
ithc_log_regs(ithc);
return len;
}

static const struct file_operations ithc_debugfops_cmd = {
.owner = THIS_MODULE,
.write = ithc_debugfs_cmd_write,
};

static void ithc_debugfs_devres_release(struct device *dev, void *res) {
struct dentry **dbgm = res;
if (*dbgm) debugfs_remove_recursive(*dbgm);
}

int ithc_debug_init(struct ithc *ithc) {
struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL);
if (!dbgm) return -ENOMEM;
devres_add(&ithc->pci->dev, dbgm);
struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
if (IS_ERR(dbg)) return PTR_ERR(dbg);
*dbgm = dbg;

struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
if (IS_ERR(cmd)) return PTR_ERR(cmd);

return 0;
}

258 changes: 258 additions & 0 deletions drivers/hid/ithc/ithc-dma.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
#include "ithc.h"

static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) {
p->num_pages = num_pages;
p->dir = dir;
p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
if (!p->addr) return -ENOMEM;
if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT;
return 0;
}

struct ithc_sg_table {
void *addr;
struct sg_table sgt;
enum dma_data_direction dir;
};
static void ithc_dma_sgtable_free(struct sg_table *sgt) {
struct scatterlist *sg;
int i;
for_each_sgtable_sg(sgt, sg, i) {
struct page *p = sg_page(sg);
if (p) __free_page(p);
}
sg_free_table(sgt);
}
static void ithc_dma_data_devres_release(struct device *dev, void *res) {
struct ithc_sg_table *sgt = res;
if (sgt->addr) vunmap(sgt->addr);
dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
ithc_dma_sgtable_free(&sgt->sgt);
}

static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) {
// We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional).
// Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now).
struct page *pages[16];
if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL;
b->active_idx = -1;
struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL);
if (!sgt) return -ENOMEM;
sgt->dir = prds->dir;
if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
struct scatterlist *sg;
int i;
bool ok = true;
for_each_sgtable_sg(&sgt->sgt, sg, i) {
struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA
if (!p) { ok = false; break; }
sg_set_page(sg, p, PAGE_SIZE, 0);
}
if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
devres_add(&ithc->pci->dev, sgt);
b->sgt = &sgt->sgt;
b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
if (!b->addr) return -ENOMEM;
return 0;
}
ithc_dma_sgtable_free(&sgt->sgt);
}
devres_free(sgt);
return -ENOMEM;
}

static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
struct ithc_phys_region_desc *prd = prds->addr;
prd += idx * prds->num_pages;
if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; }
b->active_idx = idx;
if (prds->dir == DMA_TO_DEVICE) {
if (b->data_size > PAGE_SIZE) return -EINVAL;
prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
prd->size = b->data_size | PRD_FLAG_END;
flush_kernel_vmap_range(b->addr, b->data_size);
} else if (prds->dir == DMA_FROM_DEVICE) {
struct scatterlist *sg;
int i;
for_each_sgtable_dma_sg(b->sgt, sg, i) {
prd->addr = sg_dma_address(sg) >> 10;
prd->size = sg_dma_len(sg);
prd++;
}
prd[-1].size |= PRD_FLAG_END;
}
dma_wmb(); // for the prds
dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir);
return 0;
}

static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
struct ithc_phys_region_desc *prd = prds->addr;
prd += idx * prds->num_pages;
if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; }
b->active_idx = -1;
if (prds->dir == DMA_FROM_DEVICE) {
dma_rmb(); // for the prds
b->data_size = 0;
struct scatterlist *sg;
int i;
for_each_sgtable_dma_sg(b->sgt, sg, i) {
unsigned size = prd->size;
b->data_size += size & PRD_SIZE_MASK;
if (size & PRD_FLAG_END) break;
if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; }
prd++;
}
invalidate_kernel_vmap_range(b->addr, b->data_size);
}
dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir);
return 0;
}

int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) {
struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
mutex_init(&rx->mutex);
u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages);
CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
for (unsigned i = 0; i < NUM_RX_BUF; i++)
CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
u8 head = readb(&ithc->regs->dma_rx[channel].head);
if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; }
for (unsigned i = 0; i < NUM_RX_BUF; i++)
CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
return 0;
}
void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) {
bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
}

int ithc_dma_tx_init(struct ithc *ithc) {
struct ithc_dma_tx *tx = &ithc->dma_tx;
mutex_init(&tx->mutex);
tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages);
CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
return 0;
}

static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) {
if (buf >= NUM_RX_BUF) {
pci_err(ithc->pci, "invalid dma ringbuffer index\n");
return -EINVAL;
}
ithc_set_active(ithc);
u32 len = data->data_size;
struct ithc_dma_rx_header *hdr = data->addr;
u8 *hiddata = (void *)(hdr + 1);
if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) {
CHECK(ithc_reset, ithc);
} else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) {
if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
// When the CPU enters a low power state during DMA, we can get truncated messages.
// Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes.
// See also ithc_set_active().
} else {
pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size);
print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
}
} else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
WRITE_ONCE(ithc->hid_parse_done, true);
wake_up(&ithc->wait_hid_parse);
} else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
} else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
bool done = false;
mutex_lock(&ithc->hid_get_feature_mutex);
if (ithc->hid_get_feature_buf) {
if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size;
memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
ithc->hid_get_feature_buf = NULL;
done = true;
}
mutex_unlock(&ithc->hid_get_feature_mutex);
if (done) wake_up(&ithc->wait_hid_get_feature);
else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1);
} else {
pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code);
print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
}
return 0;
}

static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
unsigned n = rx->num_received;
u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
while (1) {
u8 tail = n % NUM_RX_BUF;
u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG);
writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
// ringbuffer is full if tail_wrap == head_wrap
// ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0;

// take the buffer that the device just filled
struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail);
rx->num_received = ++n;

// process data
CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail);

// give the buffer back to the device
CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
}
}
int ithc_dma_rx(struct ithc *ithc, u8 channel) {
struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
mutex_lock(&rx->mutex);
int ret = ithc_dma_rx_unlocked(ithc, channel);
mutex_unlock(&rx->mutex);
return ret;
}

static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
struct ithc_dma_tx_header *hdr;
u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
unsigned fullsize = sizeof *hdr + datasize + padding;
if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL;
CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);

ithc->dma_tx.buf.data_size = fullsize;
hdr = ithc->dma_tx.buf.addr;
hdr->code = cmdcode;
hdr->data_size = datasize;
u8 *dest = (void *)(hdr + 1);
memcpy(dest, data, datasize);
dest += datasize;
for (u8 p = 0; p < padding; p++) *dest++ = 0;
CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);

bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
return 0;
}
int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
mutex_lock(&ithc->dma_tx.mutex);
int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
mutex_unlock(&ithc->dma_tx.mutex);
return ret;
}

Loading

0 comments on commit 4b37b1c

Please sign in to comment.