https://www.cnblogs.com/newjiang/p/15675746.html
如果dr_mode为device,则初始化gadget。
如果dr_mode为host,需要初始化xHCI驱动。在dwc3_host_init函数的最后调用platform_device_add(xhci)添加platform device(xhci-hcd),用于匹配xHCI driver(xHCI driver为platform driver),参照第3节。
如果dr_mode为otg,需要根据extcon来选定一个角色(host或者device)进行初始化,所以还需要extcon驱动的支持,参照第2节
https://blog.csdn.net/z1026544682/article/details/101023041?utm_medium=distribute.pc_relevant.none-task-blog-2~default~baidujs_utm_term~default-8-101023041-blog-124888836.235^v43^pc_blog_bottom_relevance_base6&spm=1001.2101.3001.4242.5&utm_relevant_index=11
对于设备树节点中的reg属性(地址),通过type=IORESOURCE_MEM来获得;
对于设备树节点中的interrupts属性(中断),通过type=IORESOURCE_IRQ来获得;
怎么获取转换为platform_device的设备树节点中的非标准属性,比如pin(pin=xxx)?
对于根节点"/ { };",会保存在一个全局变量of_root里面( device_node类型)。可以通过访问该全局变量(代表根节点),得到任意一个节点,得到节点后可以读出属性。
于是,可以使用内核提供的函数,直接访问device_node,从而读出这些属性。
这些函数可以分为3来:找到节点,找到属性,获取属性的值。这些函数位于内核源码include/linux/of.h。
/* Tertiary USB port related controller */
usb2: ssusb@a400000 {
compatible = "qcom,dwc-usb3-msm";
reg = <0xa400000 0x100000>;
reg-names = "core_base";
iommus = <&apps_smmu 0x0800 0x0>;
qcom,iommu-dma = "bypass";
#address-cells = <1>;
#size-cells = <1>;
ranges;
dma-ranges;
interrupts-extended = <&pdc 127 IRQ_TYPE_EDGE_RISING>,
<&pdc 126 IRQ_TYPE_EDGE_RISING>,
<&pdc 129 IRQ_TYPE_EDGE_RISING>,
<&pdc 128 IRQ_TYPE_EDGE_RISING>,
<&pdc 131 IRQ_TYPE_EDGE_RISING>,
<&pdc 130 IRQ_TYPE_EDGE_RISING>,
<&pdc 133 IRQ_TYPE_EDGE_RISING>,
<&pdc 132 IRQ_TYPE_EDGE_RISING>,
<&pdc 16 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "dp_hs_phy_irq", "dm_hs_phy_irq",
"dp_hs_phy_irq1", "dm_hs_phy_irq1",
"dp_hs_phy_irq2", "dm_hs_phy_irq2",
"dp_hs_phy_irq3", "dm_hs_phy_irq3",
"ss_phy_irq", "ss_phy_irq1";
qcom,use-pdc-interrupts;
USB3_GDSC-supply = <&gcc_usb30_mp_gdsc>;
clocks = <&gcc GCC_USB30_MP_MASTER_CLK>,
<&gcc GCC_CFG_NOC_USB3_MP_AXI_CLK>,
<&gcc GCC_AGGRE_USB3_MP_AXI_CLK>,
<&gcc GCC_USB30_MP_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_MP_SLEEP_CLK>,
<&gcc GCC_AGGRE_USB_NOC_AXI_CLK>,
<&gcc GCC_AGGRE_USB_NOC_NORTH_AXI_CLK>,
<&gcc GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK>,
<&gcc GCC_SYS_NOC_USB_AXI_CLK>;
clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
"utmi_clk", "sleep_clk", "noc_aggr_clk",
"noc_aggr_north_clk", "noc_aggr_south_clk",
"noc_sys_clk";
resets = <&gcc GCC_USB30_MP_BCR>;
reset-names = "core_reset";
qcom,core-clk-rate = <200000000>;
qcom,ignore-wakeup-src-in-hostmode;
status = "disabled";
dwc3@a400000 {
compatible = "snps,dwc3";
reg = <0xa400000 0xd93c>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
usb-phy = <&usb2_phy2>, <&usb_qmp_phy0>,
<&usb2_phy3>, <&usb_qmp_phy1>,
<&usb2_phy4>, <&usb_nop_phy>,
<&usb2_phy5>, <&usb_nop_phy>;
linux,sysdev_is_parent;
snps,disable-clk-gating;
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,ssp-u3-u0-quirk;
snps,is-utmi-l1-suspend;
snps,dis_u3_susphy_quirk;
maximum-speed = "super-speed-plus";
dr_mode = "host";
};
dwc3_probe 匹配compatible = "snps,dwc3";
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res, dwc_res;
struct dwc3 *dwc;
int ret;
void __iomem *regs;
int irq;
char dma_ipc_log_ctx_name[40];
if (count >= DWC_CTRL_COUNT) {
dev_err(dev, "Err dwc instance %d >= %d available\n",
count, DWC_CTRL_COUNT);
ret = -EINVAL;
return ret;
}
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
if (!dwc)
return -ENOMEM;
dwc->dev = dev;
//解析res reg = <0xa400000 0xd93c>;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "missing memory resource\n");
return -ENODEV;
}
dwc->reg_phys = res->start;//0xa400000
dwc->xhci_resources[0].start = res->start;//0xa400000
dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
DWC3_XHCI_REGS_END;
dwc->xhci_resources[0].flags = res->flags;
dwc->xhci_resources[0].name = res->name;
//irq = 133
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
ret = devm_request_irq(dev, irq, dwc3_interrupt, IRQF_SHARED, "dwc3",
dwc);
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
return -ENODEV;
}
if (notify_event)
/* will be enabled in dwc3_msm_resume() */
disable_irq(irq);
dwc->irq = irq;
/*
* Request memory region but exclude xHCI regs,
* since it will be requested by the xhci-plat driver.
*/
dwc_res = *res;
dwc_res.start += DWC3_GLOBALS_REGS_START;
//获取reg 虚拟地址
regs = devm_ioremap_resource(dev, &dwc_res);
if (IS_ERR(regs))
return PTR_ERR(regs);
dwc->dwc_wq = alloc_ordered_workqueue("dwc_wq", WQ_HIGHPRI);
if (!dwc->dwc_wq) {
dev_err(dev,
"%s: Unable to create workqueue dwc_wq\n", __func__);
goto err0;
}
INIT_WORK(&dwc->bh_work, dwc3_bh_work);
dwc->regs = regs;
dwc->regs_size = resource_size(&dwc_res);
dwc3_get_properties(dwc);
dwc->reset = devm_reset_control_array_get(dev, true, true);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
if (dev->of_node) {
ret = devm_clk_bulk_get_all(dev, &dwc->clks);
if (ret == -EPROBE_DEFER)
goto err0;
/*
* Clocks are optional, but new DT platforms should support all
* clocks as required by the DT-binding.
*/
if (ret < 0)
dwc->num_clks = 0;
else
dwc->num_clks = ret;
}
ret = dwc3_extract_num_phys(dwc);
if (ret) {
dev_err(dwc->dev, "Unable to extract number of PHYs\n");
goto err0;
}
dwc->usb2_phy = devm_kzalloc(dwc->dev,
sizeof(*dwc->usb2_phy) * dwc->num_hsphy, GFP_KERNEL);
dwc->usb3_phy = devm_kzalloc(dwc->dev,
sizeof(*dwc->usb3_phy) * dwc->num_ssphy, GFP_KERNEL);
ret = reset_control_deassert(dwc->reset);
if (ret)
goto err0;
ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks);
if (ret)
goto assert_reset;
platform_set_drvdata(pdev, dwc);
init_waitqueue_head(&dwc->wait_linkstate);
spin_lock_init(&dwc->lock);
pm_runtime_no_callbacks(dev);
pm_runtime_set_active(dev);
if (dwc->enable_bus_suspend) {
pm_runtime_set_autosuspend_delay(dev,
DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
}
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
goto err1;
}
ret = dwc3_alloc_scratch_buffers(dwc);
if (ret)
goto err2;
dwc3_debugfs_init(dwc);
if (!notify_event) {
ret = dwc3_core_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to initialize core: %d\n",
ret);
goto err3;
}
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
goto err3;
}
ret = dwc3_core_init_mode(dwc);
if (ret) {
dwc3_event_buffers_cleanup(dwc);
goto err3;
}
} else if (dwc->dr_mode == USB_DR_MODE_OTG ||
dwc->dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = dwc3_gadget_init(dwc);
if (ret) {
dev_err(dwc->dev, "gadget init failed %d\n", ret);
goto err3;
}
}
dwc->dwc_ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES,
dev_name(dwc->dev), 0);
if (!dwc->dwc_ipc_log_ctxt)
dev_dbg(dwc->dev, "ipc_log_ctxt is not available\n");
snprintf(dma_ipc_log_ctx_name, sizeof(dma_ipc_log_ctx_name),
"%s.ep_events", dev_name(dwc->dev));
dwc->dwc_dma_ipc_log_ctxt = ipc_log_context_create(2 * NUM_LOG_PAGES,
dma_ipc_log_ctx_name, 0);
if (!dwc->dwc_dma_ipc_log_ctxt)
dev_dbg(dwc->dev, "ipc_log_ctxt for ep_events is not available\n");
dwc3_instance[count] = dwc;
dwc->index = count;
count++;
pm_runtime_allow(dev);
return 0;
err3:
dwc3_debugfs_exit(dwc);
dwc3_free_scratch_buffers(dwc);
err2:
dwc3_free_event_buffers(dwc);
err1:
pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
reset_control_assert(dwc->reset);
destroy_workqueue(dwc->dwc_wq);
err0:
return ret;
}
dwc3_core_init_mode
static int __maybe_unused dwc3_core_init_mode(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
int ret;
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
if (dwc->usb2_phy[0])
otg_set_vbus(dwc->usb2_phy[0]->otg, false);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to initialize gadget\n");
return ret;
}
dwc->vbus_active = true;
break;
case USB_DR_MODE_HOST:
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
if (dwc->usb2_phy[0])
otg_set_vbus(dwc->usb2_phy[0]->otg, true);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to initialize host\n");
return ret;
}
break;
case USB_DR_MODE_OTG:
INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
ret = dwc3_drd_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to initialize dual-role\n");
return ret;
}
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
return -EINVAL;
}
return 0;
}
dwc3_host_init
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[6];
struct platform_device *xhci;
int ret, irq;
struct resource *res;
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int prop_idx = 0;
struct property_entry imod_prop;
//获取中断号133
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
return irq;
res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
if (!res)
res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
"dwc_usb3");
if (!res)
// //获取中断寄存器地址interrupts
res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
if (!res)
return -ENOMEM;
dwc->xhci_resources[1].start = irq;
dwc->xhci_resources[1].end = irq;
dwc->xhci_resources[1].flags = res->flags;
dwc->xhci_resources[1].name = res->name;
//分配一个name=xhci-hcd platform_device结构提初始化
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
dev_err(dwc->dev, "couldn't allocate xHCI device\n");
return -ENOMEM;
}
xhci->dev.parent = dwc->dev;
// 将dwc 与xhci 绑定
dwc->xhci = xhci;
ret = platform_device_add_resources(xhci, dwc->xhci_resources,
DWC3_XHCI_RESOURCES_NUM);
if (ret) {
dev_err(dwc->dev, "couldn't add resources to xHCI device\n");
goto err;
}
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
if (dwc->usb2_lpm_disable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
if (dwc->xhci_imod_value) {
imod_prop.name = "imod-interval-ns";
imod_prop.length = sizeof(u32);
imod_prop.is_array = false;
imod_prop.type = DEV_PROP_U32;
imod_prop.value.u32_data = dwc->xhci_imod_value;
props[prop_idx++] = imod_prop;
}
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
* where Port Disable command doesn't work.
*
* The suggested workaround is that we avoid Port Disable
* completely.
*
* This following flag tells XHCI to do just that.
*/
if (dwc->revision <= DWC3_REVISION_300A)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
ret = platform_device_add_properties(xhci, props);
if (ret) {
dev_err(dwc->dev, "failed to add properties to xHCI\n");
goto err;
}
}
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
goto err;
}
return 0;
err:
platform_device_put(xhci);
return ret;
}
EXPORT_SYMBOL(dwc3_host_init);
xhci_plat_probe xhci-hcd match进行probe
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
const struct hc_driver *driver;
struct device *sysdev, *tmpdev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
int ret;
int irq;
struct xhci_plat_priv *priv = NULL;
if (usb_disabled())
return -ENODEV;
driver = &xhci_plat_hc_driver;
//获取dwc3 的中断号133
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/*
* sysdev must point to a device that is known to the system firmware
* or PCI hardware. We handle these three cases here:
* 1. xhci_plat comes from firmware
* 2. xhci_plat is child of a device from firmware (dwc3-plat)
* 3. xhci_plat is grandchild of a pci device (dwc3-pci)
*/
for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
if (is_of_node(sysdev->fwnode) ||
is_acpi_device_node(sysdev->fwnode))
break;
#ifdef CONFIG_PCI
else if (sysdev->bus == &pci_bus_type)
break;
#endif
}
if (!sysdev)
sysdev = &pdev->dev;
/*
* If sysdev dev is having parent i.e. "linux,sysdev_is_parent" is true,
* then use sysdev->parent device.
*/
if (sysdev->parent && sysdev->parent->of_node &&
device_property_read_bool(sysdev, "linux,sysdev_is_parent"))
sysdev = sysdev->parent;
/* Try to set 64-bit DMA first */
if (WARN_ON(!sysdev->dma_mask))
/* Platform did not initialize dma_mask */
ret = dma_coerce_mask_and_coherent(sysdev,
DMA_BIT_MASK(64));
else
ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
/* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */
if (ret) {
ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(32));
if (ret)
return ret;
}
//初始化hcd driver driver = &xhci_plat_hc_driver
hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), NULL);
if (!hcd)
return -ENOMEM;
//进行ioremap 资源映射 获取reg 地址 reg = <0xa400000 0xd93c>;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto put_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
xhci = hcd_to_xhci(hcd);
/*
* Not all platforms have clks so it is not an error if the
* clock do not exist.
*/
xhci->reg_clk = devm_clk_get_optional(&pdev->dev, "reg");
if (IS_ERR(xhci->reg_clk)) {
ret = PTR_ERR(xhci->reg_clk);
goto put_hcd;
}
ret = clk_prepare_enable(xhci->reg_clk);
if (ret)
goto put_hcd;
xhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(xhci->clk)) {
ret = PTR_ERR(xhci->clk);
goto disable_reg_clk;
}
ret = clk_prepare_enable(xhci->clk);
if (ret)
goto disable_reg_clk;
if (pdev->dev.parent)
pm_runtime_resume(pdev->dev.parent);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
priv_match = of_device_get_match_data(&pdev->dev);
if (priv_match) {
priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
if (priv_match)
*priv = *priv_match;
}
if (device_may_wakeup(sysdev))
device_init_wakeup(hcd->self.controller, 1);
xhci->main_hcd = hcd;
//初始化shared_hcd driver
xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto disable_clk;
}
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
/* Iterate over all parent nodes for finding quirks */
for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
xhci->quirks |= XHCI_HW_LPM_DISABLE;
if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
}
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
goto put_usb3_hcd;
hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(hcd->usb_phy);
if (ret)
goto put_usb3_hcd;
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
if (priv) {
ret = xhci_priv_plat_setup(hcd);
if (ret)
goto disable_usb_phy;
}
if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
hcd->skip_phy_initialization = 1;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_usb_phy;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto dealloc_usb2_hcd;
device_enable_async_suspend(&pdev->dev);
if (device_may_wakeup(sysdev)) {
device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev);
device_wakeup_enable(&hcd->self.root_hub->dev);
}
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
disable_usb_phy:
usb_phy_shutdown(hcd->usb_phy);
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
disable_clk:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(xhci->clk);
disable_reg_clk:
clk_disable_unprepare(xhci->reg_clk);
put_hcd:
usb_put_hcd(hcd);
return ret;
}
__usb_create_hcd
struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
struct device *sysdev, struct device *dev, const char *bus_name,
struct usb_hcd *primary_hcd)
{
struct usb_hcd *hcd;
hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
if (!hcd)
return NULL;
if (primary_hcd == NULL) {
hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
GFP_KERNEL);
if (!hcd->address0_mutex) {
kfree(hcd);
dev_dbg(dev, "hcd address0 mutex alloc failed\n");
return NULL;
}
mutex_init(hcd->address0_mutex);
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
kfree(hcd->address0_mutex);
kfree(hcd);
dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
return NULL;
}
mutex_init(hcd->bandwidth_mutex);
dev_set_drvdata(dev, hcd);
} else {
mutex_lock(&usb_port_peer_mutex);
hcd->address0_mutex = primary_hcd->address0_mutex;
hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
hcd->primary_hcd = primary_hcd;
primary_hcd->primary_hcd = primary_hcd;
hcd->shared_hcd = primary_hcd;
primary_hcd->shared_hcd = hcd;
mutex_unlock(&usb_port_peer_mutex);
}
kref_init(&hcd->kref);
usb_bus_init(&hcd->self);
hcd->self.controller = dev;
hcd->self.sysdev = sysdev;
hcd->self.bus_name = bus_name;
timer_setup(&hcd->rh_timer, rh_timer_func, 0);
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
INIT_WORK(&hcd->died_work, hcd_died_work);
//初始化hcd->driver
hcd->driver = driver;
hcd->speed = driver->flags & HCD_MASK;
hcd->product_desc = (driver->product_desc) ? driver->product_desc :
"USB Host Controller";
return hcd;
}
EXPORT_SYMBOL_GPL(__usb_create_hcd);
usb_bus_init
static void usb_bus_init (struct usb_bus *bus)
{
memset (&bus->devmap, 0, sizeof(struct usb_devmap));
bus->devnum_next = 1;
bus->root_hub = NULL;
bus->busnum = -1;
bus->bandwidth_allocated = 0;
bus->bandwidth_int_reqs = 0;
bus->bandwidth_isoc_reqs = 0;
mutex_init(&bus->devnum_next_mutex);
}
usb_add_hcd
//hcd->driver->reset = xhci_plat_setup 获取
注册中断函数usb_hcd_request_irqs
注册 root_hub register_root_hub
循环检测中断寄存器 usb_hcd_poll_rh_status
/**
* usb_add_hcd - finish generic HCD structure initialization and register
* @hcd: the usb_hcd structure to initialize
* @irqnum: Interrupt line to allocate
* @irqflags: Interrupt type flags
*
* Finish the remaining parts of generic HCD initialization: allocate the
* buffers of consistent memory, register the bus, request the IRQ line,
* and call the driver's reset() and start() routines.
*/
int usb_add_hcd(struct usb_hcd *hcd,
unsigned int irqnum, unsigned long irqflags)
{
int retval;
struct usb_device *rhdev;
struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
if (IS_ERR(hcd->phy_roothub))
return PTR_ERR(hcd->phy_roothub);
retval = usb_phy_roothub_init(hcd->phy_roothub);
if (retval)
return retval;
retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
PHY_MODE_USB_HOST_SS);
if (retval)
retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
PHY_MODE_USB_HOST);
if (retval)
goto err_usb_phy_roothub_power_on;
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
if (retval)
goto err_usb_phy_roothub_power_on;
}
dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
switch (authorized_default) {
case USB_AUTHORIZE_NONE:
hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE;
break;
case USB_AUTHORIZE_ALL:
hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL;
break;
case USB_AUTHORIZE_INTERNAL:
hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL;
break;
case USB_AUTHORIZE_WIRED:
default:
hcd->dev_policy = hcd->wireless ?
USB_DEVICE_AUTHORIZE_NONE : USB_DEVICE_AUTHORIZE_ALL;
break;
}
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* per default all interfaces are authorized */
set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
/* HC is in reset state, but accessible. Now do the one-time init,
* bottom up so that hcds can customize the root hubs before hub_wq
* starts talking to them. (Note, bus id is assigned early too.)
*/
retval = hcd_buffer_create(hcd);
if (retval != 0) {
dev_dbg(hcd->self.sysdev, "pool alloc failed\n");
goto err_create_buf;
}
retval = usb_register_bus(&hcd->self);
if (retval < 0)
goto err_register_bus;
rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
if (rhdev == NULL) {
dev_err(hcd->self.sysdev, "unable to allocate root hub\n");
retval = -ENOMEM;
goto err_allocate_root_hub;
}
mutex_lock(&usb_port_peer_mutex);
hcd->self.root_hub = rhdev;
mutex_unlock(&usb_port_peer_mutex);
rhdev->rx_lanes = 1;
rhdev->tx_lanes = 1;
switch (hcd->speed) {
case HCD_USB11:
rhdev->speed = USB_SPEED_FULL;
break;
case HCD_USB2:
rhdev->speed = USB_SPEED_HIGH;
break;
case HCD_USB25:
rhdev->speed = USB_SPEED_WIRELESS;
break;
case HCD_USB3:
rhdev->speed = USB_SPEED_SUPER;
break;
case HCD_USB32:
rhdev->rx_lanes = 2;
rhdev->tx_lanes = 2;
/* fall through */
case HCD_USB31:
rhdev->speed = USB_SPEED_SUPER_PLUS;
break;
default:
retval = -EINVAL;
goto err_set_rh_speed;
}
/* wakeup flag init defaults to "everything works" for root hubs,
* but drivers can override it in reset() if needed, along with
* recording the overall controller's system wakeup capability.
*/
device_set_wakeup_capable(&rhdev->dev, 1);
/* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is
* registered. But since the controller can die at any time,
* let's initialize the flag before touching the hardware.
*/
set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
/* "reset" is misnamed; its role is now one-time init. the controller
* should already have been reset (and boot firmware kicked off etc).
*/
if (hcd->driver->reset) {
//hcd->driver->reset = xhci_plat_setup
retval = hcd->driver->reset(hcd);
if (retval < 0) {
dev_err(hcd->self.controller, "can't setup: %d\n",
retval);
goto err_hcd_driver_setup;
}
}
hcd->rh_pollable = 1;
retval = usb_phy_roothub_calibrate(hcd->phy_roothub);
if (retval)
goto err_hcd_driver_setup;
/* NOTE: root hub and controller capabilities may not be the same */
if (device_can_wakeup(hcd->self.controller)
&& device_can_wakeup(&hcd->self.root_hub->dev))
dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
/* initialize tasklets */
init_giveback_urb_bh(&hcd->high_prio_bh);
init_giveback_urb_bh(&hcd->low_prio_bh);
/* enable irqs just before we start the controller,
* if the BIOS provides legacy PCI irqs.
*/
if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
if (retval)
goto err_request_irq;
}
hcd->state = HC_STATE_RUNNING;
retval = hcd->driver->start(hcd);
if (retval < 0) {
dev_err(hcd->self.controller, "startup error %d\n", retval);
goto err_hcd_driver_start;
}
/* starting here, usbcore will pay attention to the shared HCD roothub */
shared_hcd = hcd->shared_hcd;
if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
retval = register_root_hub(shared_hcd);
if (retval != 0)
goto err_register_root_hub;
if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
usb_hcd_poll_rh_status(shared_hcd);
}
/* starting here, usbcore will pay attention to this root hub */
if (!HCD_DEFER_RH_REGISTER(hcd)) {
retval = register_root_hub(hcd);
if (retval != 0)
goto err_register_root_hub;
if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
usb_hcd_poll_rh_status(hcd);
}
return retval;
err_register_root_hub:
hcd->rh_pollable = 0;
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
hcd->driver->stop(hcd);
hcd->state = HC_STATE_HALT;
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
err_hcd_driver_start:
if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
free_irq(irqnum, hcd);
err_request_irq:
err_hcd_driver_setup:
err_set_rh_speed:
usb_put_invalidate_rhdev(hcd);
err_allocate_root_hub:
usb_deregister_bus(&hcd->self);
err_register_bus:
hcd_buffer_destroy(hcd);
err_create_buf:
usb_phy_roothub_power_off(hcd->phy_roothub);
err_usb_phy_roothub_power_on:
usb_phy_roothub_exit(hcd->phy_roothub);
return retval;
}
EXPORT_SYMBOL_GPL(usb_add_hcd);
xhci_plat_setup
/* called during probe() after chip reset completes */
static int xhci_plat_setup(struct usb_hcd *hcd)
{
int ret;
ret = xhci_priv_init_quirk(hcd);
if (ret)
return ret;
return xhci_gen_setup(hcd, xhci_plat_quirks);
}
xhci_gen_setup
/**
* struct xhci_op_regs - xHCI Host Controller Operational Registers.
* @command: USBCMD - xHC command register
* @status: USBSTS - xHC status register
* @page_size: This indicates the page size that the host controller
* supports. If bit n is set, the HC supports a page size
* of 2^(n+12), up to a 128MB page size.
* 4K is the minimum page size.
* @cmd_ring: CRP - 64-bit Command Ring Pointer
* @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
* @config_reg: CONFIG - Configure Register
* @port_status_base: PORTSCn - base address for Port Status and Control
* Each port has a Port Status and Control register,
* followed by a Port Power Management Status and Control
* register, a Port Link Info register, and a reserved
* register.
* @port_power_base: PORTPMSCn - base address for
* Port Power Management Status and Control
* @port_link_base: PORTLIn - base address for Port Link Info (current
* Link PM state and control) for USB 2.1 and USB 3.0
* devices.
*/
struct xhci_op_regs {
__le32 command;
__le32 status;
__le32 page_size;
__le32 reserved1;
__le32 reserved2;
__le32 dev_notification;
__le64 cmd_ring;
/* rsvd: offset 0x20-2F */
__le32 reserved3[4];
__le64 dcbaa_ptr;
__le32 config_reg;
/* rsvd: offset 0x3C-3FF */
__le32 reserved4[241];
/* port 1 registers, which serve as a base address for other ports */
__le32 port_status_base;
__le32 port_power_base;
__le32 port_link_base;
__le32 reserved5;
/* registers for ports 2-255 */
__le32 reserved6[NUM_PORT_REGS*254];
};
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
{
struct xhci_hcd *xhci;
/*
* TODO: Check with DWC3 clients for sysdev according to
* quirks
*/
struct device *dev = hcd->self.sysdev;
unsigned int minor_rev;
int retval;
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
/* support to build packet from discontinuous buffers */
hcd->self.no_sg_constraint = 1;
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
xhci = hcd_to_xhci(hcd);
if (usb_hcd_is_primary_hcd(hcd)) {
xhci->main_hcd = hcd;
xhci->usb2_rhub.hcd = hcd;
/* Mark the first roothub as being USB 2.0.
* The xHCI driver will register the USB 3.0 roothub.
*/
hcd->speed = HCD_USB2;
hcd->self.root_hub->speed = USB_SPEED_HIGH;
/*
* USB 2.0 roothub under xHCI has an integrated TT,
* (rate matching hub) as opposed to having an OHCI/UHCI
* companion controller.
*/
hcd->has_tt = 1;
} else {
/*
* Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
* should return 0x31 for sbrn, or that the minor revision
* is a two digit BCD containig minor and sub-minor numbers.
* This was later clarified in xHCI 1.2.
*
* Some USB 3.1 capable hosts therefore have sbrn 0x30, and
* minor revision set to 0x1 instead of 0x10.
*/
if (xhci->usb3_rhub.min_rev == 0x1)
minor_rev = 1;
else
minor_rev = xhci->usb3_rhub.min_rev / 0x10;
switch (minor_rev) {
case 2:
hcd->speed = HCD_USB32;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
hcd->self.root_hub->rx_lanes = 2;
hcd->self.root_hub->tx_lanes = 2;
break;
case 1:
hcd->speed = HCD_USB31;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
break;
}
xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
minor_rev,
minor_rev ? "Enhanced " : "");
xhci->usb3_rhub.hcd = hcd;
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
return 0;
}
mutex_init(&xhci->mutex);
xhci->cap_regs = hcd->regs;
//获取op_regs 的地址
xhci->op_regs = hcd->regs +
HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
xhci->run_regs = hcd->regs +
(readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
/* Cache read-only capability registers */
xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
xhci->hci_version = HC_VERSION(xhci->hcc_params);
xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
if (xhci->hci_version > 0x100)
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
xhci->quirks |= quirks;
get_quirks(dev, xhci);
/* In xhci controllers which follow xhci 1.0 spec gives a spurious
* success event after a short transfer. This quirk will ignore such
* spurious event.
*/
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
return retval;
xhci_zero_64b_regs(xhci);
xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
if (retval)
return retval;
xhci_dbg(xhci, "Reset complete\n");
/*
* On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
* of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
* address memory pointers actually. So, this driver clears the AC64
* bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
* DMA_BIT_MASK(32)) in this xhci_gen_setup().
*/
if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
xhci->hcc_params &= ~BIT(0);
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
!dma_set_mask(dev, DMA_BIT_MASK(64))) {
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
} else {
/*
* This is to avoid error in cases where a 32-bit USB
* controller is used on a 64-bit capable system.
*/
retval = dma_set_mask(dev, DMA_BIT_MASK(32));
if (retval)
return retval;
xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
}
xhci_dbg(xhci, "Calling HCD init\n");
/* Initialize HCD and host controller data structures. */
retval = xhci_init(hcd);
if (retval)
return retval;
xhci_dbg(xhci, "Called HCD init\n");
xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
xhci->hcc_params, xhci->hci_version, xhci->quirks);
return 0;
}
EXPORT_SYMBOL_GPL(xhci_gen_setup);
xhci_init
static int xhci_init(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
spin_lock_init(&xhci->lock);
if (xhci->hci_version == 0x95 && link_quirk) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Not clearing Link TRB chain bits.");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI doesn't need link TRB QUIRK");
}
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
xhci->quirks |= XHCI_COMP_MODE_QUIRK;
compliance_mode_recovery_timer_init(xhci);
}
return retval;
}
xhci_mem_init
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
int i;
INIT_LIST_HEAD(&xhci->cmd_list);
/* init command timeout work */
INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
init_completion(&xhci->cmd_ring_stop_completion);
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0)
break;
page_size = page_size >> 1;
}
if (i < 16)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size of %iK", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"HCD page size set to %iK", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// xHC can handle at most %d device slots.", val);
val2 = readl(&xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting Max device slots reg = 0x%x.", val);
writel(val, &xhci->op_regs->config_reg);
/*
* xHCI section 5.4.6 - doorbell array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
flags);
if (!xhci->dcbaa)
goto fail;
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
* however, the command ring segment needs 64-byte aligned segments
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2112, 64, xhci->page_size);
if (!xhci->segment_pool || !xhci->device_pool)
goto fail;
/* Linear stream context arrays don't have any boundary restrictions,
* and only need to be 16-byte aligned.
*/
xhci->small_streams_pool =
dma_pool_create("xHCI 256 byte stream ctx arrays",
dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
xhci->medium_streams_pool =
dma_pool_create("xHCI 1KB stream ctx arrays",
dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
* will be allocated with dma_alloc_coherent()
*/
if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocated command ring at %p", xhci->cmd_ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%016llx", val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
if (!xhci->lpm_command)
goto fail;
/* Reserve one command ring TRB for disabling LPM.
* Since the USB core grabs the shared usb_bus bandwidth mutex before
* disabling LPM, we only need to reserve one TRB for all devices.
*/
xhci->cmd_ring_reserved_trbs++;
val = readl(&xhci->cap_regs->db_off);
val &= DBOFF_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Doorbell array is located at offset 0x%x"
" from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
/*
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
if (xhci_event_ring_init(xhci, GFP_KERNEL))
goto fail;
if (xhci_check_trb_in_td_math(xhci) < 0)
goto fail;
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
* See section 5.5.1.2.
*/
for (i = 0; i < MAX_HC_SLOTS; i++)
xhci->devs[i] = NULL;
for (i = 0; i < USB_MAXCHILDREN; i++) {
xhci->usb2_rhub.bus_state.resume_done[i] = 0;
xhci->usb3_rhub.bus_state.resume_done[i] = 0;
/* Only the USB 2.0 completions will ever be used. */
init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
}
if (scratchpad_alloc(xhci, flags))
goto fail;
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
/* Enable USB 3.0 device notifications for function remote wake, which
* is necessary for allowing USB 3.0 devices to do remote wakeup from
* U3 (device suspend).
*/
temp = readl(&xhci->op_regs->dev_notification);
temp &= ~DEV_NOTE_MASK;
temp |= DEV_NOTE_FWAKE;
writel(temp, &xhci->op_regs->dev_notification);
return 0;
fail:
xhci_halt(xhci);
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}
xhci_setup_port_arrays
/*
* Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
* specify what speeds each port is supposed to be. We can't count on the port
* speed bits in the PORTSC register being correct until a device is connected,
* but we need to set up the two fake roothubs with the correct number of USB
* 3.0 and USB 2.0 ports at host controller initialization time.
*/
static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{
void __iomem *base;
u32 offset;
unsigned int num_ports;
int i, j;
int cap_count = 0;
u32 cap_start;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
flags, dev_to_node(dev));
if (!xhci->hw_ports)
return -ENOMEM;
for (i = 0; i < num_ports; i++) {
//获取hw_port PORTSCn 寄存器ioremap的地址
//PORTSCn - base address for Port Status and Control
xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
NUM_PORT_REGS * i;
xhci->hw_ports[i].hw_portnum = i;
}
xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
dev_to_node(dev));
if (!xhci->rh_bw)
return -ENOMEM;
for (i = 0; i < num_ports; i++) {
struct xhci_interval_bw_table *bw_table;
INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
bw_table = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++)
INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
}
base = &xhci->cap_regs->hc_capbase;
cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
if (!cap_start) {
xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
return -ENODEV;
}
offset = cap_start;
/* count extended protocol capability entries for later caching */
while (offset) {
cap_count++;
offset = xhci_find_next_ext_cap(base, offset,
XHCI_EXT_CAPS_PROTOCOL);
}
xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
flags, dev_to_node(dev));
if (!xhci->ext_caps)
return -ENOMEM;
xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
flags, dev_to_node(dev));
if (!xhci->port_caps)
return -ENOMEM;
offset = cap_start;
while (offset) {
xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
num_ports)
break;
offset = xhci_find_next_ext_cap(base, offset,
XHCI_EXT_CAPS_PROTOCOL);
}
if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 3.0 roothub ports to %u.",
USB_SS_MAXPORTS);
xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
}
if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN);
xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
}
/*
* Note we could have all USB 3.0 ports, or all USB 2.0 ports.
* Not sure how the USB core will handle a hub with no ports...
*/
//将xhci匹配xhci->usbx_rhub 获取rhub port 虚拟地址
xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
return 0;
}
xhci_create_rhub_port_array
static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
struct xhci_hub *rhub, gfp_t flags)
{
int port_index = 0;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!rhub->num_ports)
return;
rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
dev_to_node(dev));
for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
if (xhci->hw_ports[i].rhub != rhub ||
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
continue;
xhci->hw_ports[i].hcd_portnum = port_index;
// 将&xhci->hw_ports[i] 转换成rhub->ports
rhub->ports[port_index] = &xhci->hw_ports[i];
port_index++;
if (port_index == rhub->num_ports)
break;
}
}
usb_hcd_request_irqs
static int usb_hcd_request_irqs(struct usb_hcd *hcd,
unsigned int irqnum, unsigned long irqflags)
{
int retval;
if (hcd->driver->irq) {
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
//注册中断函数usb_hcd_irq
retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
hcd->irq_descr, hcd);
if (retval != 0) {
dev_err(hcd->self.controller,
"request interrupt %d failed\n",
irqnum);
return retval;
}
hcd->irq = irqnum;
dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
(hcd->driver->flags & HCD_MEMORY) ?
"io mem" : "io base",
(unsigned long long)hcd->rsrc_start);
} else {
hcd->irq = 0;
if (hcd->rsrc_start)
dev_info(hcd->self.controller, "%s 0x%08llx\n",
(hcd->driver->flags & HCD_MEMORY) ?
"io mem" : "io base",
(unsigned long long)hcd->rsrc_start);
}
return 0;
}
检测host 的中断寄存状态
/*
* Root Hub interrupt transfers are polled using a timer if the
* driver requests it; otherwise the driver is responsible for
* calling usb_hcd_poll_rh_status() when an event occurs.
*
* Completions are called in_interrupt(), but they may or may not
* be in_irq().
*/
void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
{
struct urb *urb;
int length;
int status;
unsigned long flags;
char buffer[6]; /* Any root hubs with > 31 ports? */
if (unlikely(!hcd->rh_pollable))
return;
if (!hcd->uses_new_polling && !hcd->status_urb)
return;
length = hcd->driver->hub_status_data(hcd, buffer);
if (length > 0) {
/* try to complete the status urb */
spin_lock_irqsave(&hcd_root_hub_lock, flags);
urb = hcd->status_urb;
if (urb) {
clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
hcd->status_urb = NULL;
if (urb->transfer_buffer_length >= length) {
status = 0;
} else {
status = -EOVERFLOW;
length = urb->transfer_buffer_length;
}
urb->actual_length = length;
memcpy(urb->transfer_buffer, buffer, length);
usb_hcd_unlink_urb_from_ep(hcd, urb);
usb_hcd_giveback_urb(hcd, urb, status);
} else {
length = 0;
set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
}
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
}
/* The USB 2.0 spec says 256 ms. This is close enough and won't
* exceed that limit if HZ is 100. The math is more clunky than
* maybe expected, this is to make sure that all timers for USB devices
* fire at the same time to give the CPU a break in between */
if (hcd->uses_new_polling ? HCD_POLL_RH(hcd) :
(length == 0 && hcd->status_urb != NULL))
mod_timer (&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
}
EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
xhci_hub_status_data 检测usb 中断寄存器
读写usb host xhci寄存器_xhci 寄存器-CSDN博客
/*
* Returns 0 if the status hasn't changed, or the number of bytes in buf.
* Ports are 0-indexed from the HCD point of view,
* and 1-indexed from the USB core pointer of view.
*
* Note that the status change bits will be cleared as soon as a port status
* change event is generated, so we use the saved status from that event.
*/
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
unsigned long flags;
u32 temp, status;
u32 mask;
int i, retval;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports;
struct xhci_bus_state *bus_state;
bool reset_change = false;
struct xhci_hub *rhub;
struct xhci_port **ports;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &rhub->bus_state;
/* Initial status is no changes */
retval = (max_ports + 8) / 8;
memset(buf, 0, retval);
/*
* Inform the usbcore about resume-in-progress by returning
* a non-zero value even if there are no status changes.
*/
spin_lock_irqsave(&xhci->lock, flags);
status = bus_state->resuming_ports;
/*
* SS devices are only visible to roothub after link training completes.
* Keep polling roothubs for a grace period after xHC start
*/
if (xhci->run_graceperiod) {
if (time_before(jiffies, xhci->run_graceperiod))
status = 1;
else
xhci->run_graceperiod = 0;
}
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
//hw_port PORTSCn
temp = readl(ports[i]->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
trace_xhci_hub_status_data(i, temp);
if ((temp & mask) != 0 ||
(bus_state->port_c_suspend & 1 << i) ||
(bus_state->resume_done[i] && time_after_eq(
jiffies, bus_state->resume_done[i]))) {
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
if ((temp & PORT_RC))
reset_change = true;
if (temp & PORT_OC)
status = 1;
}
if (!status && !reset_change) {
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
}
spin_unlock_irqrestore(&xhci->lock, flags);
return status ? retval : 0;
}
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
.hcd_priv_size = sizeof(struct xhci_hcd),
/*
* generic hardware linkage
*/
.irq = xhci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
/*
* basic lifecycle operations
*/
.reset = NULL, /* set in xhci_init_driver() */
.start = xhci_run,
.stop = xhci_stop,
.shutdown = xhci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.map_urb_for_dma = xhci_map_urb_for_dma,
.urb_enqueue = xhci_urb_enqueue,
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
.free_dev = xhci_free_dev,
.alloc_streams = xhci_alloc_streams,
.free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
.endpoint_disable = xhci_endpoint_disable,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.enable_device = xhci_enable_device,
.update_hub_device = xhci_update_hub_device,
.reset_device = xhci_discover_or_reset_device,
/*
* scheduling support
*/
.get_frame_number = xhci_get_frame,
/*
* root hub support
*/
.hub_control = xhci_hub_control,
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
.get_resuming_ports = xhci_get_resuming_ports,
/*
* call back when device connected and addressed
*/
.update_device = xhci_update_device,
.set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
.enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
.disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
.find_raw_port_number = xhci_find_raw_port_number,
.clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
};
usb_hcd_irq hcd中断处理
/**
* usb_hcd_irq - hook IRQs to HCD framework (bus glue)
* @irq: the IRQ being raised
* @__hcd: pointer to the HCD whose IRQ is being signaled
*
* If the controller isn't HALTed, calls the driver's irq handler.
* Checks whether the controller is now dead.
*
* Return: %IRQ_HANDLED if the IRQ was handled. %IRQ_NONE otherwise.
*/
irqreturn_t usb_hcd_irq (int irq, void *__hcd)
{
struct usb_hcd *hcd = __hcd;
irqreturn_t rc;
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
//xhci_irq
else if (hcd->driver->irq(hcd) == IRQ_NONE)
rc = IRQ_NONE;
else
rc = IRQ_HANDLED;
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_irq);
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
*/
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
union xhci_trb *event_ring_deq;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
u64 temp_64;
u32 status;
int event_loop = 0;
spin_lock_irqsave(&xhci->lock, flags);
/* Check if the xHC generated the interrupt, or the irq is shared */
//获取 op reg status的信息
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
ret = IRQ_HANDLED;
goto out;
}
if (!(status & STS_EINT))
goto out;
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
ret = IRQ_HANDLED;
goto out;
}
/*
* Clear the op reg interrupt status first,
* so we can receive interrupts from other MSI-X interrupters.
* Write 1 to clear the interrupt status.
*/
// 将status 状态清零
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
//判断是否支持msi 中断特性读取硬件pending的 irq
if (!hcd->msi_enabled) {
u32 irq_pending;
irq_pending = readl(&xhci->ir_set->irq_pending);
irq_pending |= IMAN_IP;
writel(irq_pending, &xhci->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING ||
xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
"Shouldn't IRQs be disabled?\n");
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci_write_64(xhci, temp_64 | ERST_EHB,
&xhci->ir_set->erst_dequeue);
ret = IRQ_HANDLED;
goto out;
}
event_ring_deq = xhci->event_ring->dequeue;
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
while (xhci_handle_event(xhci) > 0) {
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
xhci_update_erst_dequeue(xhci, event_ring_deq);
event_ring_deq = xhci->event_ring->dequeue;
event_loop = 0;
}
xhci_update_erst_dequeue(xhci, event_ring_deq);
ret = IRQ_HANDLED;
out:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
xhci_handle_event
/*
* This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/
static int xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
int ret;
/* Event ring hasn't been allocated yet. */
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci_err(xhci, "ERROR event ring not ready\n");
return -ENOMEM;
}
event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
xhci->event_ring->cycle_state)
return 0;
trace_xhci_handle_event(xhci->event_ring, &event->generic);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
* speculative reads of the event's flags/data below.
*/
rmb();
/* FIXME: Handle more event types. */
switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_COMPLETION):
handle_cmd_completion(xhci, &event->event_cmd);
break;
case TRB_TYPE(TRB_PORT_STATUS):
handle_port_status(xhci, event);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
ret = handle_tx_event(xhci, &event->trans_event);
if (ret >= 0)
update_ptrs = 0;
break;
case TRB_TYPE(TRB_DEV_NOTE):
handle_device_notification(xhci, event);
break;
default:
if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
TRB_TYPE(48))
handle_vendor_event(xhci, event);
else
xhci_warn(xhci, "ERROR unknown event type %d\n",
TRB_FIELD_TO_TYPE(
le32_to_cpu(event->event_cmd.flags)));
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI host dying, returning from "
"event handler.\n");
return 0;
}
if (update_ptrs)
/* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
*/
return 1;
}